rte_eth_rx_descriptor_status() should be used as a replacement.
Signed-off-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Thomas Monjalon <thomas@monjalon.net>
--------------------
Supports check the status of a Rx descriptor. When ``rx_descriptor_status`` is
-used, status can be "Available", "Done" or "Unavailable". When
-``rx_descriptor_done`` is used, status can be "DD bit is set" or "DD bit is
-not set".
+used, status can be "Available", "Done" or "Unavailable".
* **[implements] rte_eth_dev**: ``rx_descriptor_status``.
* **[related] API**: ``rte_eth_rx_descriptor_status()``.
-* **[implements] rte_eth_dev**: ``rx_descriptor_done``.
-* **[related] API**: ``rte_eth_rx_descriptor_done()``.
.. _nic_features_tx_descriptor_status:
the device packet overhead can be calculated as:
``(struct rte_eth_dev_info).max_rx_pktlen - (struct rte_eth_dev_info).max_mtu``
-* ethdev: ``rx_descriptor_done`` dev_ops and ``rte_eth_rx_descriptor_done``
- will be removed in 21.11.
- Existing ``rte_eth_rx_descriptor_status`` and ``rte_eth_tx_descriptor_status``
- APIs can be used as replacement.
-
* ethdev: Announce moving from dedicated modify function for each field,
to using the general ``rte_flow_modify_field`` action.
``rte_eth_mirror_rule_reset`` along with the associated macros
``ETH_MIRROR_*`` are removed.
+* ethdev: Removed ``rte_eth_rx_descriptor_done`` API function and its
+ driver callback. It is replaced by the more complete function
+ ``rte_eth_rx_descriptor_status``.
+
* i40e: Removed i40evf driver.
iavf already became the default VF driver for i40e devices,
so there is no need to maintain i40evf.
uint32_t eth_igb_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
-int eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset);
-
int eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset);
uint32_t eth_em_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
-int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
-
int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
eth_dev->dev_ops = ð_em_ops;
eth_dev->rx_queue_count = eth_em_rx_queue_count;
- eth_dev->rx_descriptor_done = eth_em_rx_descriptor_done;
eth_dev->rx_descriptor_status = eth_em_rx_descriptor_status;
eth_dev->tx_descriptor_status = eth_em_tx_descriptor_status;
eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts;
return desc;
}
-int
-eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- volatile struct e1000_rx_desc *rxdp;
- struct em_rx_queue *rxq = rx_queue;
- uint32_t desc;
-
- if (unlikely(offset >= rxq->nb_rx_desc))
- return 0;
- desc = rxq->rx_tail + offset;
- if (desc >= rxq->nb_rx_desc)
- desc -= rxq->nb_rx_desc;
-
- rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->status & E1000_RXD_STAT_DD);
-}
-
int
eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
eth_dev->dev_ops = ð_igb_ops;
eth_dev->rx_queue_count = eth_igb_rx_queue_count;
- eth_dev->rx_descriptor_done = eth_igb_rx_descriptor_done;
eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &igbvf_eth_dev_ops;
- eth_dev->rx_descriptor_done = eth_igb_rx_descriptor_done;
eth_dev->rx_descriptor_status = eth_igb_rx_descriptor_status;
eth_dev->tx_descriptor_status = eth_igb_tx_descriptor_status;
eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
return desc;
}
-int
-eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- volatile union e1000_adv_rx_desc *rxdp;
- struct igb_rx_queue *rxq = rx_queue;
- uint32_t desc;
-
- if (unlikely(offset >= rxq->nb_rx_desc))
- return 0;
- desc = rxq->rx_tail + offset;
- if (desc >= rxq->nb_rx_desc)
- desc -= rxq->nb_rx_desc;
-
- rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
-}
-
int
eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
uint32_t
fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-int
-fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
-
int
fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
dev->dev_ops = &fm10k_eth_dev_ops;
dev->rx_queue_count = fm10k_dev_rx_queue_count;
- dev->rx_descriptor_done = fm10k_dev_rx_descriptor_done;
dev->rx_descriptor_status = fm10k_dev_rx_descriptor_status;
dev->tx_descriptor_status = fm10k_dev_tx_descriptor_status;
dev->rx_pkt_burst = &fm10k_recv_pkts;
return desc;
}
-int
-fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- volatile union fm10k_rx_desc *rxdp;
- struct fm10k_rx_queue *rxq = rx_queue;
- uint16_t desc;
- int ret;
-
- if (unlikely(offset >= rxq->nb_desc)) {
- PMD_DRV_LOG(ERR, "Invalid RX descriptor offset %u", offset);
- return 0;
- }
-
- desc = rxq->next_dd + offset;
- if (desc >= rxq->nb_desc)
- desc -= rxq->nb_desc;
-
- rxdp = &rxq->hw_ring[desc];
-
- ret = !!(rxdp->w.status &
- rte_cpu_to_le_16(FM10K_RXD_STATUS_DD));
-
- return ret;
-}
-
int
fm10k_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
dev->dev_ops = &i40e_eth_dev_ops;
dev->rx_queue_count = i40e_dev_rx_queue_count;
- dev->rx_descriptor_done = i40e_dev_rx_descriptor_done;
dev->rx_descriptor_status = i40e_dev_rx_descriptor_status;
dev->tx_descriptor_status = i40e_dev_tx_descriptor_status;
dev->rx_pkt_burst = i40e_recv_pkts;
return desc;
}
-int
-i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- volatile union i40e_rx_desc *rxdp;
- struct i40e_rx_queue *rxq = rx_queue;
- uint16_t desc;
- int ret;
-
- if (unlikely(offset >= rxq->nb_rx_desc)) {
- PMD_DRV_LOG(ERR, "Invalid RX descriptor id %u", offset);
- return 0;
- }
-
- desc = rxq->rx_tail + offset;
- if (desc >= rxq->nb_rx_desc)
- desc -= rxq->nb_rx_desc;
-
- rxdp = &(rxq->rx_ring[desc]);
-
- ret = !!(((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
- I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT) &
- (1 << I40E_RX_DESC_STATUS_DD_SHIFT));
-
- return ret;
-}
-
int
i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
uint32_t i40e_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
-int i40e_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
int i40e_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
PMD_INIT_FUNC_TRACE();
dev->dev_ops = ð_igc_ops;
- dev->rx_descriptor_done = eth_igc_rx_descriptor_done;
dev->rx_queue_count = eth_igc_rx_queue_count;
dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
return desc;
}
-int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- volatile union igc_adv_rx_desc *rxdp;
- struct igc_rx_queue *rxq = rx_queue;
- uint32_t desc;
-
- if (unlikely(!rxq || offset >= rxq->nb_rx_desc))
- return 0;
-
- desc = rxq->rx_tail + offset;
- if (desc >= rxq->nb_rx_desc)
- desc -= rxq->nb_rx_desc;
-
- rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->wb.upper.status_error &
- rte_cpu_to_le_32(IGC_RXD_STAT_DD));
-}
-
int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
struct igc_rx_queue *rxq = rx_queue;
uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
-int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset);
-
int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset);
int eth_igc_tx_descriptor_status(void *tx_queue, uint16_t offset);
eth_dev->dev_ops = &ixgbe_eth_dev_ops;
eth_dev->rx_queue_count = ixgbe_dev_rx_queue_count;
- eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done;
eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &ixgbevf_eth_dev_ops;
- eth_dev->rx_descriptor_done = ixgbe_dev_rx_descriptor_done;
eth_dev->rx_descriptor_status = ixgbe_dev_rx_descriptor_status;
eth_dev->tx_descriptor_status = ixgbe_dev_tx_descriptor_status;
eth_dev->rx_pkt_burst = &ixgbe_recv_pkts;
uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
-int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
-
int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
return desc;
}
-int
-ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- volatile union ixgbe_adv_rx_desc *rxdp;
- struct ixgbe_rx_queue *rxq = rx_queue;
- uint32_t desc;
-
- if (unlikely(offset >= rxq->nb_rx_desc))
- return 0;
- desc = rxq->rx_tail + offset;
- if (desc >= rxq->nb_rx_desc)
- desc -= rxq->nb_rx_desc;
-
- rxdp = &rxq->rx_ring[desc];
- return !!(rxdp->wb.upper.status_error &
- rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD));
-}
-
int
ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
int rc, max_entries;
eth_dev->dev_ops = &otx2_eth_dev_ops;
- eth_dev->rx_descriptor_done = otx2_nix_rx_descriptor_done;
eth_dev->rx_queue_count = otx2_nix_rx_queue_count;
eth_dev->rx_descriptor_status = otx2_nix_rx_descriptor_status;
eth_dev->tx_descriptor_status = otx2_nix_tx_descriptor_status;
struct rte_eth_burst_mode *mode);
uint32_t otx2_nix_rx_queue_count(struct rte_eth_dev *eth_dev, uint16_t qidx);
int otx2_nix_tx_done_cleanup(void *txq, uint32_t free_cnt);
-int otx2_nix_rx_descriptor_done(void *rxq, uint16_t offset);
int otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset);
int otx2_nix_tx_descriptor_status(void *tx_queue, uint16_t offset);
return 0;
}
-int
-otx2_nix_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
- struct otx2_eth_rxq *rxq = rx_queue;
- uint32_t head, tail;
-
- nix_rx_head_tail_get(otx2_eth_pmd_priv(rxq->eth_dev),
- &head, &tail, rxq->rq);
-
- return nix_offset_has_packet(head, tail, offset);
-}
-
int
otx2_nix_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
return sap->dp_rx->qdesc_npending(rxq_info->dp);
}
-/*
- * The function is used by the secondary process as well. It must not
- * use any process-local pointers from the adapter data.
- */
-static int
-sfc_rx_descriptor_done(void *queue, uint16_t offset)
-{
- struct sfc_dp_rxq *dp_rxq = queue;
- const struct sfc_dp_rx *dp_rx;
-
- dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
-
- return offset < dp_rx->qdesc_npending(dp_rxq);
-}
-
/*
* The function is used by the secondary process as well. It must not
* use any process-local pointers from the adapter data.
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->rx_queue_count = sfc_rx_queue_count;
- dev->rx_descriptor_done = sfc_rx_descriptor_done;
dev->rx_descriptor_status = sfc_rx_descriptor_status;
dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_ops;
dev->tx_pkt_prepare = dp_tx->pkt_prepare;
dev->tx_pkt_burst = dp_tx->pkt_burst;
dev->rx_queue_count = sfc_rx_queue_count;
- dev->rx_descriptor_done = sfc_rx_descriptor_done;
dev->rx_descriptor_status = sfc_rx_descriptor_status;
dev->tx_descriptor_status = sfc_tx_descriptor_status;
dev->dev_ops = &sfc_eth_dev_secondary_ops;
}
eth_dev->dev_ops = &virtio_eth_dev_ops;
- eth_dev->rx_descriptor_done = virtio_dev_rx_queue_done;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
set_rxtx_funcs(eth_dev);
* RX/TX function prototypes
*/
-int virtio_dev_rx_queue_done(void *rxq, uint16_t offset);
-
int virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
const struct rte_eth_rxconf *rx_conf,
#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
#endif
-int
-virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
-{
- struct virtnet_rx *rxvq = rxq;
- struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
-
- return virtqueue_nused(vq) >= offset;
-}
-
void
vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
{
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
eth_dev->rx_queue_count = NULL;
- eth_dev->rx_descriptor_done = NULL;
eth_dev->rx_descriptor_status = NULL;
eth_dev->tx_descriptor_status = NULL;
eth_dev->dev_ops = NULL;
return (int)(*dev->rx_queue_count)(dev, queue_id);
}
-/**
- * Check if the DD bit of the specific RX descriptor in the queue has been set
- *
- * @param port_id
- * The port identifier of the Ethernet device.
- * @param queue_id
- * The queue id on the specific port.
- * @param offset
- * The offset of the descriptor ID from tail.
- * @return
- * - (1) if the specific DD bit is set.
- * - (0) if the specific DD bit is not set.
- * - (-ENODEV) if *port_id* invalid.
- * - (-ENOTSUP) if the device does not support this function
- */
-__rte_deprecated
-static inline int
-rte_eth_rx_descriptor_done(uint16_t port_id, uint16_t queue_id, uint16_t offset)
-{
- struct rte_eth_dev *dev = &rte_eth_devices[port_id];
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- RTE_FUNC_PTR_OR_ERR_RET(*dev->rx_descriptor_done, -ENOTSUP);
- return (*dev->rx_descriptor_done)(dev->data->rx_queues[queue_id], offset);
-}
-
/**@{@name Rx hardware descriptor states
* @see rte_eth_rx_descriptor_status
*/
uint16_t rx_queue_id);
/**< @internal Get number of used descriptors on a receive queue. */
-typedef int (*eth_rx_descriptor_done_t)(void *rxq, uint16_t offset);
-/**< @internal Check DD bit of specific RX descriptor */
-
typedef int (*eth_rx_descriptor_status_t)(void *rxq, uint16_t offset);
/**< @internal Check the status of a Rx descriptor */
eth_tx_prep_t tx_pkt_prepare; /**< Pointer to PMD transmit prepare function. */
eth_rx_queue_count_t rx_queue_count; /**< Get the number of used RX descriptors. */
- eth_rx_descriptor_done_t rx_descriptor_done; /**< Check rxd DD bit. */
eth_rx_descriptor_status_t rx_descriptor_status; /**< Check the status of a Rx descriptor. */
eth_tx_descriptor_status_t tx_descriptor_status; /**< Check the status of a Tx descriptor. */