#include <rte_prefetch.h>
#include <rte_string_fns.h>
#include <rte_errno.h>
+#include <rte_byteorder.h>
#include "virtio_logs.h"
#include "virtio_ethdev.h"
return i;
}
+#ifndef DEFAULT_TX_FREE_THRESH
+#define DEFAULT_TX_FREE_THRESH 32
+#endif
+
+/* Cleanup from completed transmits. */
static void
-virtqueue_dequeue_pkt_tx(struct virtqueue *vq)
+virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
{
- struct vring_used_elem *uep;
- uint16_t used_idx, desc_idx;
+ uint16_t i, used_idx, desc_idx;
+ for (i = 0; i < num; i++) {
+ struct vring_used_elem *uep;
+ struct vq_desc_extra *dxp;
+
+ used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+
+ desc_idx = (uint16_t) uep->id;
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
- used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
- desc_idx = (uint16_t) uep->id;
- vq->vq_used_cons_idx++;
- vq_ring_free_chain(vq, desc_idx);
+ if (dxp->cookie != NULL) {
+ rte_pktmbuf_free(dxp->cookie);
+ dxp->cookie = NULL;
+ }
+ }
}
idx = head_idx;
dxp = &txvq->vq_descx[idx];
- if (dxp->cookie != NULL)
- rte_pktmbuf_free(dxp->cookie);
dxp->cookie = (void *)cookie;
dxp->ndescs = needed;
* Reinitialise since virtio port might have been stopped and restarted
*/
memset(vq->vq_ring_virt_mem, 0, vq->vq_ring_size);
- vring_init(vr, size, ring_mem, vq->vq_alignment);
+ vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
vq->vq_used_cons_idx = 0;
vq->vq_desc_head_idx = 0;
vq->vq_avail_idx = 0;
void
virtio_dev_cq_start(struct rte_eth_dev *dev)
{
- struct virtio_hw *hw
- = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct virtio_hw *hw = dev->data->dev_private;
- virtio_dev_vring_start(hw->cvq, VTNET_CQ);
- VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
+ if (hw->cvq) {
+ virtio_dev_vring_start(hw->cvq, VTNET_CQ);
+ VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
+ }
}
void
{
uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
struct virtqueue *vq;
+ uint16_t tx_free_thresh;
int ret;
PMD_INIT_FUNC_TRACE();
- if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS)
- != ETH_TXQ_FLAGS_NOOFFLOADS) {
+ if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMS)
+ != ETH_TXQ_FLAGS_NOXSUMS) {
PMD_INIT_LOG(ERR, "TX checksum offload not supported\n");
return -EINVAL;
}
return ret;
}
+ tx_free_thresh = tx_conf->tx_free_thresh;
+ if (tx_free_thresh == 0)
+ tx_free_thresh =
+ RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
+
+ if (tx_free_thresh >= (vq->vq_nentries - 3)) {
+ RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
+ "number of TX entries minus 3 (%u)."
+ " (tx_free_thresh=%u port=%u queue=%u)\n",
+ vq->vq_nentries - 3,
+ tx_free_thresh, dev->data->port_id, queue_idx);
+ return -EINVAL;
+ }
+
+ vq->vq_free_thresh = tx_free_thresh;
+
dev->data->tx_queues[queue_idx] = vq;
return 0;
}
}
#define VIRTIO_MBUF_BURST_SZ 64
-#define DESC_PER_CACHELINE (CACHE_LINE_SIZE / sizeof(struct vring_desc))
+#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
uint16_t
virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct virtqueue *rxvq = rx_queue;
+ struct virtio_hw *hw;
struct rte_mbuf *rxm, *new_mbuf;
- uint16_t nb_used, num, nb_rx = 0;
+ uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
int error;
- uint32_t i, nb_enqueued = 0;
+ uint32_t i, nb_enqueued;
const uint32_t hdr_size = sizeof(struct virtio_net_hdr);
nb_used = VIRTQUEUE_NUSED(rxvq);
- rmb();
+ virtio_rmb();
num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ);
num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
+
+ hw = rxvq->hw;
+ nb_rx = 0;
+ nb_enqueued = 0;
+
for (i = 0; i < num ; i++) {
rxm = rcv_pkts[i];
rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
rxm->data_len = (uint16_t)(len[i] - hdr_size);
+ if (hw->vlan_strip)
+ rte_vlan_strip(rxm);
+
VIRTIO_DUMP_PACKET(rxm, rxm->data_len);
rx_pkts[nb_rx++] = rxm;
}
if (likely(nb_enqueued)) {
+ vq_update_avail_idx(rxvq);
+
if (unlikely(virtqueue_kick_prepare(rxvq))) {
virtqueue_notify(rxvq);
PMD_RX_LOG(DEBUG, "Notified\n");
}
}
- vq_update_avail_idx(rxvq);
-
return nb_rx;
}
uint16_t nb_pkts)
{
struct virtqueue *rxvq = rx_queue;
+ struct virtio_hw *hw;
struct rte_mbuf *rxm, *new_mbuf;
- uint16_t nb_used, num, nb_rx = 0;
+ uint16_t nb_used, num, nb_rx;
uint32_t len[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
struct rte_mbuf *prev;
int error;
- uint32_t i = 0, nb_enqueued = 0;
- uint32_t seg_num = 0;
- uint16_t extra_idx = 0;
- uint32_t seg_res = 0;
+ uint32_t i, nb_enqueued;
+ uint32_t seg_num;
+ uint16_t extra_idx;
+ uint32_t seg_res;
const uint32_t hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
nb_used = VIRTQUEUE_NUSED(rxvq);
- rmb();
+ virtio_rmb();
if (nb_used == 0)
return 0;
PMD_RX_LOG(DEBUG, "used:%d\n", nb_used);
+ hw = rxvq->hw;
+ nb_rx = 0;
+ i = 0;
+ nb_enqueued = 0;
+ seg_num = 0;
+ extra_idx = 0;
+ seg_res = 0;
+
while (i < nb_used) {
struct virtio_net_hdr_mrg_rxbuf *header;
seg_res -= rcv_cnt;
}
+ if (hw->vlan_strip)
+ rte_vlan_strip(rx_pkts[nb_rx]);
+
VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
rx_pkts[nb_rx]->data_len);
}
if (likely(nb_enqueued)) {
+ vq_update_avail_idx(rxvq);
+
if (unlikely(virtqueue_kick_prepare(rxvq))) {
virtqueue_notify(rxvq);
PMD_RX_LOG(DEBUG, "Notified");
}
}
- vq_update_avail_idx(rxvq);
-
return nb_rx;
}
{
struct virtqueue *txvq = tx_queue;
struct rte_mbuf *txm;
- uint16_t nb_used, nb_tx, num;
+ uint16_t nb_used, nb_tx;
int error;
- nb_tx = 0;
-
if (unlikely(nb_pkts < 1))
return nb_pkts;
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(txvq);
- rmb();
+ virtio_rmb();
+ if (likely(nb_used > txvq->vq_free_thresh))
+ virtio_xmit_cleanup(txvq, nb_used);
- num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ);
+ nb_tx = 0;
while (nb_tx < nb_pkts) {
- int need = tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt;
- int deq_cnt = RTE_MIN(need, (int)num);
-
- num -= (deq_cnt > 0) ? deq_cnt : 0;
- while (deq_cnt > 0) {
- virtqueue_dequeue_pkt_tx(txvq);
- deq_cnt--;
+ /* Need one more descriptor for virtio header. */
+ int need = tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt + 1;
+
+ /*Positive value indicates it need free vring descriptors */
+ if (unlikely(need > 0)) {
+ nb_used = VIRTQUEUE_NUSED(txvq);
+ virtio_rmb();
+ need = RTE_MIN(need, (int)nb_used);
+
+ virtio_xmit_cleanup(txvq, need);
+ need = (int)tx_pkts[nb_tx]->nb_segs -
+ txvq->vq_free_cnt + 1;
}
- if (tx_pkts[nb_tx]->nb_segs <= txvq->vq_free_cnt) {
+ /*
+ * Zero or negative value indicates it has enough free
+ * descriptors to use for transmitting.
+ */
+ if (likely(need <= 0)) {
txm = tx_pkts[nb_tx];
+
+ /* Do VLAN tag insertion */
+ if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&txm);
+ if (unlikely(error)) {
+ rte_pktmbuf_free(txm);
+ ++nb_tx;
+ continue;
+ }
+ }
+
/* Enqueue Packet buffers */
error = virtqueue_enqueue_xmit(txvq, txm);
if (unlikely(error)) {
break;
}
}
- vq_update_avail_idx(txvq);
txvq->packets += nb_tx;
- if (unlikely(virtqueue_kick_prepare(txvq))) {
- virtqueue_notify(txvq);
- PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(txvq);
+
+ if (unlikely(virtqueue_kick_prepare(txvq))) {
+ virtqueue_notify(txvq);
+ PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+ }
}
return nb_tx;