int eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset);
+int eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset);
+
int eth_em_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
.rx_queue_release = eth_em_rx_queue_release,
.rx_queue_count = eth_em_rx_queue_count,
.rx_descriptor_done = eth_em_rx_descriptor_done,
+ .rx_descriptor_status = eth_em_rx_descriptor_status,
+ .tx_descriptor_status = eth_em_tx_descriptor_status,
.tx_queue_setup = eth_em_tx_queue_setup,
.tx_queue_release = eth_em_tx_queue_release,
.rx_queue_intr_enable = eth_em_rx_queue_intr_enable,
return !!(rxdp->status & E1000_RXD_STAT_DD);
}
+int
+eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ struct em_rx_queue *rxq = rx_queue;
+ volatile uint8_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= rxq->nb_rx_desc))
+ return -EINVAL;
+
+ if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+
+ desc = rxq->rx_tail + offset;
+ if (desc >= rxq->nb_rx_desc)
+ desc -= rxq->nb_rx_desc;
+
+ status = &rxq->rx_ring[desc].status;
+ if (*status & E1000_RXD_STAT_DD)
+ return RTE_ETH_RX_DESC_DONE;
+
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
+int
+eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset)
+{
+ struct em_tx_queue *txq = tx_queue;
+ volatile uint8_t *status;
+ uint32_t desc;
+
+ if (unlikely(offset >= txq->nb_tx_desc))
+ return -EINVAL;
+
+ desc = txq->tx_tail + offset;
+ /* go to next desc that has the RS bit */
+ desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
+ txq->tx_rs_thresh;
+ if (desc >= txq->nb_tx_desc) {
+ desc -= txq->nb_tx_desc;
+ if (desc >= txq->nb_tx_desc)
+ desc -= txq->nb_tx_desc;
+ }
+
+ status = &txq->tx_ring[desc].upper.fields.status;
+ if (*status & E1000_TXD_STAT_DD)
+ return RTE_ETH_TX_DESC_DONE;
+
+ return RTE_ETH_TX_DESC_FULL;
+}
+
void
em_dev_clear_queues(struct rte_eth_dev *dev)
{