return err;
}
+void
+hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct hn_tx_queue *txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->txdesc_pool->size;
+ qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
+}
static struct hn_txdesc *hn_txd_get(struct hn_tx_queue *txq)
{
rte_free(txq);
}
+/*
+ * Check the status of a Tx descriptor in the queue.
+ *
+ * returns:
+ * - -EINVAL - offset outside of tx_descriptor pool.
+ * - RTE_ETH_TX_DESC_FULL - descriptor is not acknowledged by host.
+ * - RTE_ETH_TX_DESC_DONE - descriptor is available.
+ */
+int hn_dev_tx_descriptor_status(void *arg, uint16_t offset)
+{
+ const struct hn_tx_queue *txq = arg;
+
+ hn_process_events(txq->hv, txq->queue_id, 0);
+
+ if (offset >= rte_mempool_avail_count(txq->txdesc_pool))
+ return -EINVAL;
+
+ if (offset < rte_mempool_in_use_count(txq->txdesc_pool))
+ return RTE_ETH_TX_DESC_FULL;
+ else
+ return RTE_ETH_TX_DESC_DONE;
+}
+
static void
hn_nvs_send_completed(struct rte_eth_dev *dev, uint16_t queue_id,
unsigned long xactid, const struct hn_nvs_rndis_ack *ack)
txq->stats.bytes += txd->data_size;
txq->stats.packets += txd->packets;
} else {
- PMD_TX_LOG(NOTICE, "port %u:%u complete tx %u failed status %u",
- txq->port_id, txq->queue_id, txd->chim_index, ack->status);
+ PMD_DRV_LOG(NOTICE, "port %u:%u complete tx %u failed status %u",
+ txq->port_id, txq->queue_id, txd->chim_index, ack->status);
++txq->stats.errors;
}
break;
default:
- PMD_TX_LOG(NOTICE,
- "unexpected send completion type %u",
+ PMD_DRV_LOG(NOTICE, "unexpected send completion type %u",
hdr->type);
}
}
if (unlikely(rte_ring_sp_enqueue(rxq->rx_ring, m) != 0)) {
++rxq->stats.ring_full;
+ PMD_RX_LOG(DEBUG, "rx ring full");
rte_pktmbuf_free(m);
}
}
return rxq;
}
+void
+hn_dev_rx_queue_info(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->nb_desc = rxq->rx_ring->size;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
+}
+
int
hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx, uint16_t nb_desc,
hn_rx_queue_free(rxq, true);
}
+/*
+ * Get the number of used descriptor in a rx queue
+ * For this device that means how many packets are pending in the ring.
+ */
+uint32_t
+hn_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct hn_rx_queue *rxq = dev->data->rx_queues[queue_id];
+
+ return rte_ring_count(rxq->rx_ring);
+}
+
+/*
+ * Check the status of a Rx descriptor in the queue
+ *
+ * returns:
+ * - -EINVAL - offset outside of ring
+ * - RTE_ETH_RX_DESC_AVAIL - no data available yet
+ * - RTE_ETH_RX_DESC_DONE - data is waiting in stagin ring
+ */
+int hn_dev_rx_queue_status(void *arg, uint16_t offset)
+{
+ const struct hn_rx_queue *rxq = arg;
+
+ hn_process_events(rxq->hv, rxq->queue_id, 0);
+ if (offset >= rxq->rx_ring->capacity)
+ return -EINVAL;
+
+ if (offset < rte_ring_count(rxq->rx_ring))
+ return RTE_ETH_RX_DESC_DONE;
+ else
+ return RTE_ETH_RX_DESC_AVAIL;
+}
+
int
hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt)
{
if (tx_limit && tx_done >= tx_limit)
break;
-
- if (rxq->rx_ring && rte_ring_full(rxq->rx_ring))
- break;
}
if (bytes_read > 0)
if (likely(ret == 0))
hn_reset_txagg(txq);
- else
- PMD_TX_LOG(NOTICE, "port %u:%u send failed: %d",
- txq->port_id, txq->queue_id, ret);
+ else if (ret == -EAGAIN) {
+ PMD_TX_LOG(DEBUG, "port %u:%u channel full",
+ txq->port_id, txq->queue_id);
+ ++txq->stats.channel_full;
+ } else {
+ ++txq->stats.errors;
+ PMD_DRV_LOG(NOTICE, "port %u:%u send failed: %d",
+ txq->port_id, txq->queue_id, ret);
+ }
return ret;
}
struct hn_data *hv = txq->hv;
struct rte_eth_dev *vf_dev;
bool need_sig = false;
- uint16_t nb_tx, avail;
+ uint16_t nb_tx, tx_thresh;
int ret;
if (unlikely(hv->closed))
return 0;
+ /*
+ * Always check for events on the primary channel
+ * because that is where hotplug notifications occur.
+ */
+ tx_thresh = RTE_MAX(txq->free_thresh, nb_pkts);
+ if (txq->queue_id == 0 ||
+ rte_mempool_avail_count(txq->txdesc_pool) < tx_thresh)
+ hn_process_events(hv, txq->queue_id, 0);
+
/* Transmit over VF if present and up */
+ rte_rwlock_read_lock(&hv->vf_lock);
vf_dev = hn_get_vf_dev(hv);
-
if (vf_dev && vf_dev->data->dev_started) {
void *sub_q = vf_dev->data->tx_queues[queue_id];
- return (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+ nb_tx = (*vf_dev->tx_pkt_burst)(sub_q, tx_pkts, nb_pkts);
+ rte_rwlock_read_unlock(&hv->vf_lock);
+ return nb_tx;
}
-
- avail = rte_mempool_avail_count(txq->txdesc_pool);
- if (nb_pkts > avail || avail <= txq->free_thresh)
- hn_process_events(hv, txq->queue_id, 0);
+ rte_rwlock_read_unlock(&hv->vf_lock);
for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
struct rte_mbuf *m = tx_pkts[nb_tx];
ret = hn_xmit_sg(txq, txd, m, &need_sig);
if (unlikely(ret != 0)) {
- PMD_TX_LOG(NOTICE, "sg send failed: %d", ret);
- ++txq->stats.errors;
+ if (ret == -EAGAIN) {
+ PMD_TX_LOG(DEBUG, "sg channel full");
+ ++txq->stats.channel_full;
+ } else {
+ PMD_DRV_LOG(NOTICE, "sg send failed: %d", ret);
+ ++txq->stats.errors;
+ }
hn_txd_put(txq, txd);
goto fail;
}
if (unlikely(hv->closed))
return 0;
- /* Receive from VF if present and up */
- vf_dev = hn_get_vf_dev(hv);
-
- /* Check for new completions */
+ /* Check for new completions (and hotplug) */
if (likely(rte_ring_count(rxq->rx_ring) < nb_pkts))
hn_process_events(hv, rxq->queue_id, 0);
(void **)rx_pkts, nb_pkts, NULL);
/* If VF is available, check that as well */
+ rte_rwlock_read_lock(&hv->vf_lock);
+ vf_dev = hn_get_vf_dev(hv);
if (vf_dev && vf_dev->data->dev_started)
nb_rcv += hn_recv_vf(vf_dev->data->port_id, rxq,
rx_pkts + nb_rcv, nb_pkts - nb_rcv);
+ rte_rwlock_read_unlock(&hv->vf_lock);
return nb_rcv;
}