X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=lib%2Flibrte_pmd_virtio%2Fvirtio_rxtx.c;h=3ff275cba701f4c4864afb31138a38767f6f2d77;hb=7ada00cb0dcfe402651c2b361f7f34d0ec648556;hp=adda2a8b7189f828940fe78260ee8c905afe4301;hpb=1d5ced917697f01857311f9bd21c6feba6a33e09;p=dpdk.git diff --git a/lib/librte_pmd_virtio/virtio_rxtx.c b/lib/librte_pmd_virtio/virtio_rxtx.c index adda2a8b71..3ff275cba7 100644 --- a/lib/librte_pmd_virtio/virtio_rxtx.c +++ b/lib/librte_pmd_virtio/virtio_rxtx.c @@ -129,17 +129,32 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, return i; } +#ifndef DEFAULT_TX_FREE_THRESH +#define DEFAULT_TX_FREE_THRESH 32 +#endif + +/* Cleanup from completed transmits. */ static void -virtqueue_dequeue_pkt_tx(struct virtqueue *vq) +virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num) { - struct vring_used_elem *uep; - uint16_t used_idx, desc_idx; + uint16_t i, used_idx, desc_idx; + for (i = 0; i < num; i++) { + struct vring_used_elem *uep; + struct vq_desc_extra *dxp; + + used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; - used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); - uep = &vq->vq_ring.used->ring[used_idx]; - desc_idx = (uint16_t) uep->id; - vq->vq_used_cons_idx++; - vq_ring_free_chain(vq, desc_idx); + desc_idx = (uint16_t) uep->id; + dxp = &vq->vq_descx[desc_idx]; + vq->vq_used_cons_idx++; + vq_ring_free_chain(vq, desc_idx); + + if (dxp->cookie != NULL) { + rte_pktmbuf_free(dxp->cookie); + dxp->cookie = NULL; + } + } } @@ -203,8 +218,6 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) idx = head_idx; dxp = &txvq->vq_descx[idx]; - if (dxp->cookie != NULL) - rte_pktmbuf_free(dxp->cookie); dxp->cookie = (void *)cookie; dxp->ndescs = needed; @@ -369,7 +382,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { - uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; + uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; struct virtqueue *vq; int ret; @@ -404,6 +417,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, { uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; struct virtqueue *vq; + uint16_t tx_free_thresh; int ret; PMD_INIT_FUNC_TRACE(); @@ -421,6 +435,22 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, return ret; } + tx_free_thresh = tx_conf->tx_free_thresh; + if (tx_free_thresh == 0) + tx_free_thresh = + RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH); + + if (tx_free_thresh >= (vq->vq_nentries - 3)) { + RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the " + "number of TX entries minus 3 (%u)." + " (tx_free_thresh=%u port=%u queue=%u)\n", + vq->vq_nentries - 3, + tx_free_thresh, dev->data->port_id, queue_idx); + return -EINVAL; + } + + vq->vq_free_thresh = tx_free_thresh; + dev->data->tx_queues[queue_idx] = vq; return 0; } @@ -446,13 +476,13 @@ uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct virtqueue *rxvq = rx_queue; - struct virtio_hw *hw = rxvq->hw; + struct virtio_hw *hw; struct rte_mbuf *rxm, *new_mbuf; - uint16_t nb_used, num, nb_rx = 0; + uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; int error; - uint32_t i, nb_enqueued = 0; + uint32_t i, nb_enqueued; const uint32_t hdr_size = sizeof(struct virtio_net_hdr); nb_used = VIRTQUEUE_NUSED(rxvq); @@ -469,6 +499,11 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num); PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); + + hw = rxvq->hw; + nb_rx = 0; + nb_enqueued = 0; + for (i = 0; i < num ; i++) { rxm = rcv_pkts[i]; @@ -537,16 +572,17 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint16_t nb_pkts) { struct virtqueue *rxvq = rx_queue; + struct virtio_hw *hw; struct rte_mbuf *rxm, *new_mbuf; - uint16_t nb_used, num, nb_rx = 0; + uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ]; struct rte_mbuf *prev; int error; - uint32_t i = 0, nb_enqueued = 0; - uint32_t seg_num = 0; - uint16_t extra_idx = 0; - uint32_t seg_res = 0; + uint32_t i, nb_enqueued; + uint32_t seg_num; + uint16_t extra_idx; + uint32_t seg_res; const uint32_t hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); nb_used = VIRTQUEUE_NUSED(rxvq); @@ -558,6 +594,14 @@ virtio_recv_mergeable_pkts(void *rx_queue, PMD_RX_LOG(DEBUG, "used:%d\n", nb_used); + hw = rxvq->hw; + nb_rx = 0; + i = 0; + nb_enqueued = 0; + seg_num = 0; + extra_idx = 0; + seg_res = 0; + while (i < nb_used) { struct virtio_net_hdr_mrg_rxbuf *header; @@ -606,7 +650,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, /* * Get extra segments for current uncompleted packet. */ - uint32_t rcv_cnt = + uint16_t rcv_cnt = RTE_MIN(seg_res, RTE_DIM(rcv_pkts)); if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) { uint32_t rx_num = @@ -643,6 +687,9 @@ virtio_recv_mergeable_pkts(void *rx_queue, seg_res -= rcv_cnt; } + if (hw->vlan_strip) + rte_vlan_strip(rx_pkts[nb_rx]); + VIRTIO_DUMP_PACKET(rx_pkts[nb_rx], rx_pkts[nb_rx]->data_len); @@ -687,11 +734,9 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct virtqueue *txvq = tx_queue; struct rte_mbuf *txm; - uint16_t nb_used, nb_tx, num; + uint16_t nb_used, nb_tx; int error; - nb_tx = 0; - if (unlikely(nb_pkts < 1)) return nb_pkts; @@ -699,21 +744,26 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) nb_used = VIRTQUEUE_NUSED(txvq); virtio_rmb(); + if (likely(nb_used > txvq->vq_free_thresh)) + virtio_xmit_cleanup(txvq, nb_used); - num = (uint16_t)(likely(nb_used < VIRTIO_MBUF_BURST_SZ) ? nb_used : VIRTIO_MBUF_BURST_SZ); + nb_tx = 0; while (nb_tx < nb_pkts) { /* Need one more descriptor for virtio header. */ int need = tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt + 1; - int deq_cnt = RTE_MIN(need, (int)num); - num -= (deq_cnt > 0) ? deq_cnt : 0; - while (deq_cnt > 0) { - virtqueue_dequeue_pkt_tx(txvq); - deq_cnt--; + /*Positive value indicates it need free vring descriptors */ + if (unlikely(need > 0)) { + nb_used = VIRTQUEUE_NUSED(txvq); + virtio_rmb(); + need = RTE_MIN(need, (int)nb_used); + + virtio_xmit_cleanup(txvq, need); + need = (int)tx_pkts[nb_tx]->nb_segs - + txvq->vq_free_cnt + 1; } - need = (int)tx_pkts[nb_tx]->nb_segs - txvq->vq_free_cnt + 1; /* * Zero or negative value indicates it has enough free * descriptors to use for transmitting. @@ -722,7 +772,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txm = tx_pkts[nb_tx]; /* Do VLAN tag insertion */ - if (txm->ol_flags & PKT_TX_VLAN_PKT) { + if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) { error = rte_vlan_insert(&txm); if (unlikely(error)) { rte_pktmbuf_free(txm);