These functions are useful for applications and debugging.
The netvsc PMD also transparently handles the rx/tx descriptor
functions for underlying VF device.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
.tx_queue_setup = hn_dev_tx_queue_setup,
.tx_queue_release = hn_dev_tx_queue_release,
.tx_done_cleanup = hn_dev_tx_done_cleanup,
+ .tx_descriptor_status = hn_dev_tx_descriptor_status,
.rx_queue_setup = hn_dev_rx_queue_setup,
.rx_queue_release = hn_dev_rx_queue_release,
+ .rx_queue_count = hn_dev_rx_queue_count,
+ .rx_descriptor_status = hn_dev_rx_queue_status,
.link_update = hn_dev_link_update,
.stats_get = hn_dev_stats_get,
.stats_reset = hn_dev_stats_reset,
rte_free(txq);
}
+/*
+ * Check the status of a Tx descriptor in the queue.
+ *
+ * returns:
+ * - -EINVAL - offset outside of tx_descriptor pool.
+ * - RTE_ETH_TX_DESC_FULL - descriptor is not acknowledged by host.
+ * - RTE_ETH_TX_DESC_DONE - descriptor is available.
+ */
+int hn_dev_tx_descriptor_status(void *arg, uint16_t offset)
+{
+ const struct hn_tx_queue *txq = arg;
+
+ hn_process_events(txq->hv, txq->queue_id, 0);
+
+ if (offset >= rte_mempool_avail_count(txq->txdesc_pool))
+ return -EINVAL;
+
+ if (offset < rte_mempool_in_use_count(txq->txdesc_pool))
+ return RTE_ETH_TX_DESC_FULL;
+ else
+ return RTE_ETH_TX_DESC_DONE;
+}
+
static void
hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
hn_rx_queue_free(rxq, true);
}
+/*
+ * Get the number of used descriptor in a rx queue
+ * For this device that means how many packets are pending in the ring.
+ */
+uint32_t
+hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
+
+ return rte_ring_count(rxq->rx_ring);
+}
+
+/*
+ * Check the status of a Rx descriptor in the queue
+ *
+ * returns:
+ * - -EINVAL - offset outside of ring
+ * - RTE_ETH_RX_DESC_AVAIL - no data available yet
+ * - RTE_ETH_RX_DESC_DONE - data is waiting in stagin ring
+ */
+int hn_dev_rx_queue_status(void *arg, uint16_t offset)
+{
+ const struct hn_rx_queue *rxq = arg;
+
+ hn_process_events(rxq->hv, rxq->queue_id, 0);
+ if (offset >= rxq->rx_ring->capacity)
+ return -EINVAL;
+
+ if (offset < rte_ring_count(rxq->rx_ring))
+ return RTE_ETH_RX_DESC_DONE;
+ else
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
int
hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
{
void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
struct rte_eth_txq_info *qinfo);
int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
+int hn_dev_tx_descriptor_status(void *arg, uint16_t offset);
struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
uint16_t queue_id,
void hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
struct rte_eth_rxq_info *qinfo);
void hn_dev_rx_queue_release(void *arg);
+uint32_t hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id);
+int hn_dev_rx_queue_status(void *rxq, uint16_t offset);
void hn_dev_free_queues(struct rte_eth_dev *dev);
/* Check if VF is attached */
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf);
void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
+int hn_vf_tx_queue_status(struct hn_data *hv, uint16_t queue_id, uint16_t offset);
+
int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t nb_desc,
unsigned int socket_id,