bonding: use existing enslaved device queues
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
index 98b3809..2b88efd 100644 (file)
@@ -208,25 +208,18 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
        return 0;
 }
 
-static int
+static inline void
 virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
-                      int use_indirect)
+                      uint16_t needed, int use_indirect, int can_push)
 {
        struct vq_desc_extra *dxp;
        struct vring_desc *start_dp;
        uint16_t seg_num = cookie->nb_segs;
-       uint16_t needed = use_indirect ? 1 : 1 + seg_num;
        uint16_t head_idx, idx;
+       uint16_t head_size = txvq->hw->vtnet_hdr_size;
        unsigned long offs;
 
-       if (unlikely(txvq->vq_free_cnt == 0))
-               return -ENOSPC;
-       if (unlikely(txvq->vq_free_cnt < needed))
-               return -EMSGSIZE;
        head_idx = txvq->vq_desc_head_idx;
-       if (unlikely(head_idx >= txvq->vq_nentries))
-               return -EFAULT;
-
        idx = head_idx;
        dxp = &txvq->vq_descx[idx];
        dxp->cookie = (void *)cookie;
@@ -234,7 +227,12 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
 
        start_dp = txvq->vq_ring.desc;
 
-       if (use_indirect) {
+       if (can_push) {
+               /* put on zero'd transmit header (no offloads) */
+               void *hdr = rte_pktmbuf_prepend(cookie, head_size);
+
+               memset(hdr, 0, head_size);
+       } else if (use_indirect) {
                /* setup tx ring slot to point to indirect
                 * descriptor list stored in reserved region.
                 *
@@ -252,7 +250,7 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
 
                /* loop below will fill in rest of the indirect elements */
                start_dp = txr[idx].tx_indir;
-               idx = 0;
+               idx = 1;
        } else {
                /* setup first tx ring slot to point to header
                 * stored in reserved region.
@@ -263,30 +261,26 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie,
                start_dp[idx].addr  = txvq->virtio_net_hdr_mem + offs;
                start_dp[idx].len   = txvq->hw->vtnet_hdr_size;
                start_dp[idx].flags = VRING_DESC_F_NEXT;
+               idx = start_dp[idx].next;
        }
 
-       for (; ((seg_num > 0) && (cookie != NULL)); seg_num--) {
-               idx = start_dp[idx].next;
+       do {
                start_dp[idx].addr  = rte_mbuf_data_dma_addr(cookie);
                start_dp[idx].len   = cookie->data_len;
-               start_dp[idx].flags = VRING_DESC_F_NEXT;
-               cookie = cookie->next;
-       }
+               start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
+               idx = start_dp[idx].next;
+       } while ((cookie = cookie->next) != NULL);
 
        start_dp[idx].flags &= ~VRING_DESC_F_NEXT;
 
        if (use_indirect)
                idx = txvq->vq_ring.desc[head_idx].next;
-       else
-               idx = start_dp[idx].next;
 
        txvq->vq_desc_head_idx = idx;
        if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
                txvq->vq_desc_tail_idx = idx;
        txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
        vq_update_avail_ring(txvq, head_idx);
-
-       return 0;
 }
 
 static inline struct rte_mbuf *
@@ -619,9 +613,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        if (likely(num > DESC_PER_CACHELINE))
                num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
 
-       if (num == 0)
-               return 0;
-
        num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
        PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
 
@@ -719,9 +710,6 @@ virtio_recv_mergeable_pkts(void *rx_queue,
 
        virtio_rmb();
 
-       if (nb_used == 0)
-               return 0;
-
        PMD_RX_LOG(DEBUG, "used:%d\n", nb_used);
 
        hw = rxvq->hw;
@@ -867,6 +855,8 @@ uint16_t
 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 {
        struct virtqueue *txvq = tx_queue;
+       struct virtio_hw *hw = txvq->hw;
+       uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_used, nb_tx;
        int error;
 
@@ -882,14 +872,35 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
        for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
                struct rte_mbuf *txm = tx_pkts[nb_tx];
-               int use_indirect, slots, need;
+               int can_push = 0, use_indirect = 0, slots, need;
 
-               use_indirect = vtpci_with_feature(txvq->hw,
-                                                 VIRTIO_RING_F_INDIRECT_DESC)
-                       && (txm->nb_segs < VIRTIO_MAX_TX_INDIRECT);
+               /* Do VLAN tag insertion */
+               if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
+                       error = rte_vlan_insert(&txm);
+                       if (unlikely(error)) {
+                               rte_pktmbuf_free(txm);
+                               continue;
+                       }
+               }
 
-               /* How many main ring entries are needed to this Tx? */
-               slots = use_indirect ? 1 : 1 + txm->nb_segs;
+               /* optimize ring usage */
+               if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) &&
+                   rte_mbuf_refcnt_read(txm) == 1 &&
+                   txm->nb_segs == 1 &&
+                   rte_pktmbuf_headroom(txm) >= hdr_size &&
+                   rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
+                                  __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
+                       can_push = 1;
+               else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
+                        txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
+                       use_indirect = 1;
+
+               /* How many main ring entries are needed to this Tx?
+                * any_layout => number of segments
+                * indirect   => 1
+                * default    => number of segments + 1
+                */
+               slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
                need = slots - txvq->vq_free_cnt;
 
                /* Positive value indicates it need free vring descriptors */
@@ -907,26 +918,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                        }
                }
 
-               /* Do VLAN tag insertion */
-               if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
-                       error = rte_vlan_insert(&txm);
-                       if (unlikely(error)) {
-                               rte_pktmbuf_free(txm);
-                               continue;
-                       }
-               }
-
                /* Enqueue Packet buffers */
-               error = virtqueue_enqueue_xmit(txvq, txm, use_indirect);
-               if (unlikely(error)) {
-                       if (error == ENOSPC)
-                               PMD_TX_LOG(ERR, "virtqueue_enqueue Free count = 0");
-                       else if (error == EMSGSIZE)
-                               PMD_TX_LOG(ERR, "virtqueue_enqueue Free count < 1");
-                       else
-                               PMD_TX_LOG(ERR, "virtqueue_enqueue error: %d", error);
-                       break;
-               }
+               virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push);
 
                txvq->bytes += txm->pkt_len;
                virtio_update_packet_stats(txvq, txm);