net/virtio: add parameter to enable vectorized path
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
index 405c313..84f4cf9 100644 (file)
@@ -635,7 +635,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
        struct vring_desc *start_dp;
        struct virtio_net_hdr *hdr;
        uint16_t idx;
-       uint16_t head_size = vq->hw->vtnet_hdr_size;
+       int16_t head_size = vq->hw->vtnet_hdr_size;
        uint16_t i = 0;
 
        idx = vq->vq_desc_head_idx;
@@ -648,8 +648,8 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
                dxp->ndescs = 1;
                virtio_update_packet_stats(&txvq->stats, cookies[i]);
 
-               hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr +
-                       cookies[i]->data_off - head_size;
+               hdr = rte_pktmbuf_mtod_offset(cookies[i],
+                               struct virtio_net_hdr *, -head_size);
 
                /* if offload disabled, hdr is not zeroed yet, do it now */
                if (!vq->hw->has_tx_offload)
@@ -682,7 +682,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
        struct vring_packed_desc *dp;
        struct vq_desc_extra *dxp;
        uint16_t idx, id, flags;
-       uint16_t head_size = vq->hw->vtnet_hdr_size;
+       int16_t head_size = vq->hw->vtnet_hdr_size;
        struct virtio_net_hdr *hdr;
 
        id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
@@ -696,8 +696,8 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
        flags = vq->vq_packed.cached_flags;
 
        /* prepend cannot fail, checked by caller */
-       hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
-               cookie->data_off - head_size;
+       hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+                                     -head_size);
 
        /* if offload disabled, hdr is not zeroed yet, do it now */
        if (!vq->hw->has_tx_offload)
@@ -734,7 +734,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
        struct virtqueue *vq = txvq->vq;
        struct vring_packed_desc *start_dp, *head_dp;
        uint16_t idx, id, head_idx, head_flags;
-       uint16_t head_size = vq->hw->vtnet_hdr_size;
+       int16_t head_size = vq->hw->vtnet_hdr_size;
        struct virtio_net_hdr *hdr;
        uint16_t prev;
        bool prepend_header = false;
@@ -756,8 +756,8 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 
        if (can_push) {
                /* prepend cannot fail, checked by caller */
-               hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
-                       cookie->data_off - head_size;
+               hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+                                             -head_size);
                prepend_header = true;
 
                /* if offload disabled, it is not zeroed below, do it now */
@@ -832,7 +832,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
        struct vring_desc *start_dp;
        uint16_t seg_num = cookie->nb_segs;
        uint16_t head_idx, idx;
-       uint16_t head_size = vq->hw->vtnet_hdr_size;
+       int16_t head_size = vq->hw->vtnet_hdr_size;
        bool prepend_header = false;
        struct virtio_net_hdr *hdr;
 
@@ -849,8 +849,8 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 
        if (can_push) {
                /* prepend cannot fail, checked by caller */
-               hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
-                       cookie->data_off - head_size;
+               hdr = rte_pktmbuf_mtod_offset(cookie, struct virtio_net_hdr *,
+                                             -head_size);
                prepend_header = true;
 
                /* if offload disabled, it is not zeroed below, do it now */
@@ -936,6 +936,7 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
        struct virtio_hw *hw = dev->data->dev_private;
        struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
        struct virtnet_rx *rxvq;
+       uint16_t rx_free_thresh;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -944,6 +945,28 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       rx_free_thresh = rx_conf->rx_free_thresh;
+       if (rx_free_thresh == 0)
+               rx_free_thresh =
+                       RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
+
+       if (rx_free_thresh & 0x3) {
+               RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
+                       " (rx_free_thresh=%u port=%u queue=%u)\n",
+                       rx_free_thresh, dev->data->port_id, queue_idx);
+               return -EINVAL;
+       }
+
+       if (rx_free_thresh >= vq->vq_nentries) {
+               RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
+                       "number of RX entries (%u)."
+                       " (rx_free_thresh=%u port=%u queue=%u)\n",
+                       vq->vq_nentries,
+                       rx_free_thresh, dev->data->port_id, queue_idx);
+               return -EINVAL;
+       }
+       vq->vq_free_thresh = rx_free_thresh;
+
        if (nb_desc == 0 || nb_desc > vq->vq_nentries)
                nb_desc = vq->vq_nentries;
        vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
@@ -966,13 +989,14 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
        struct rte_mbuf *m;
        uint16_t desc_idx;
        int error, nbufs, i;
+       bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
 
        PMD_INIT_FUNC_TRACE();
 
        /* Allocate blank mbufs for the each rx descriptor */
        nbufs = 0;
 
-       if (hw->use_simple_rx) {
+       if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
                for (desc_idx = 0; desc_idx < vq->vq_nentries;
                     desc_idx++) {
                        vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
@@ -990,12 +1014,12 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
                        &rxvq->fake_mbuf;
        }
 
-       if (hw->use_simple_rx) {
+       if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
                while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
                        virtio_rxq_rearm_vec(rxvq);
                        nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
                }
-       } else if (hw->use_inorder_rx) {
+       } else if (!vtpci_packed_queue(vq->hw) && in_order) {
                if ((!virtqueue_full(vq))) {
                        uint16_t free_cnt = vq->vq_free_cnt;
                        struct rte_mbuf *pkts[free_cnt];
@@ -1067,6 +1091,11 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
+       if (tx_conf->tx_deferred_start) {
+               PMD_INIT_LOG(ERR, "Tx deferred start is not supported");
+               return -EINVAL;
+       }
+
        if (nb_desc == 0 || nb_desc > vq->vq_nentries)
                nb_desc = vq->vq_nentries;
        vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
@@ -1080,7 +1109,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
 
        if (tx_free_thresh >= (vq->vq_nentries - 3)) {
-               RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
+               PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
                        "number of TX entries minus 3 (%u)."
                        " (tx_free_thresh=%u port=%u queue=%u)\n",
                        vq->vq_nentries - 3,
@@ -1105,7 +1134,7 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
        PMD_INIT_FUNC_TRACE();
 
        if (!vtpci_packed_queue(hw)) {
-               if (hw->use_inorder_tx)
+               if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER))
                        vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
        }
 
@@ -1128,7 +1157,7 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
                error = virtqueue_enqueue_recv_refill(vq, &m, 1);
 
        if (unlikely(error)) {
-               RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+               PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
                rte_pktmbuf_free(m);
        }
 }
@@ -1140,7 +1169,7 @@ virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
 
        error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
        if (unlikely(error)) {
-               RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
+               PMD_DRV_LOG(ERR, "cannot requeue discarded mbuf");
                rte_pktmbuf_free(m);
        }
 }
@@ -2018,7 +2047,7 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
        struct virtio_hw *hw = vq->hw;
        uint16_t hdr_size = hw->vtnet_hdr_size;
        uint16_t nb_tx = 0;
-       bool in_order = hw->use_inorder_tx;
+       bool in_order = vtpci_with_feature(hw, VIRTIO_F_IN_ORDER);
 
        if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
                return nb_tx;