net/virtio: allocate queue at init stage
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
index 0fa635a..6e7ff27 100644 (file)
@@ -209,10 +209,53 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
        return 0;
 }
 
+/* When doing TSO, the IP length is not included in the pseudo header
+ * checksum of the packet given to the PMD, but for virtio it is
+ * expected.
+ */
+static void
+virtio_tso_fix_cksum(struct rte_mbuf *m)
+{
+       /* common case: header is not fragmented */
+       if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
+                       m->l4_len)) {
+               struct ipv4_hdr *iph;
+               struct ipv6_hdr *ip6h;
+               struct tcp_hdr *th;
+               uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
+               uint32_t tmp;
+
+               iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+               th = RTE_PTR_ADD(iph, m->l3_len);
+               if ((iph->version_ihl >> 4) == 4) {
+                       iph->hdr_checksum = 0;
+                       iph->hdr_checksum = rte_ipv4_cksum(iph);
+                       ip_len = iph->total_length;
+                       ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
+                               m->l3_len);
+               } else {
+                       ip6h = (struct ipv6_hdr *)iph;
+                       ip_paylen = ip6h->payload_len;
+               }
+
+               /* calculate the new phdr checksum not including ip_paylen */
+               prev_cksum = th->cksum;
+               tmp = prev_cksum;
+               tmp += ip_paylen;
+               tmp = (tmp & 0xffff) + (tmp >> 16);
+               new_cksum = tmp;
+
+               /* replace it in the packet */
+               th->cksum = new_cksum;
+       }
+}
+
 static inline int
 tx_offload_enabled(struct virtio_hw *hw)
 {
-       return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM);
+       return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
+               vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
+               vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
 }
 
 static inline void
@@ -274,8 +317,11 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                idx = start_dp[idx].next;
        }
 
+       /* Checksum Offload / TSO */
        if (offload) {
-               /* Checksum Offload */
+               if (cookie->ol_flags & PKT_TX_TCP_SEG)
+                       cookie->ol_flags |= PKT_TX_TCP_CKSUM;
+
                switch (cookie->ol_flags & PKT_TX_L4_MASK) {
                case PKT_TX_UDP_CKSUM:
                        hdr->csum_start = cookie->l2_len + cookie->l3_len;
@@ -297,9 +343,22 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                        break;
                }
 
-               hdr->gso_type = 0;
-               hdr->gso_size = 0;
-               hdr->hdr_len = 0;
+               /* TCP Segmentation Offload */
+               if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+                       virtio_tso_fix_cksum(cookie);
+                       hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+                               VIRTIO_NET_HDR_GSO_TCPV6 :
+                               VIRTIO_NET_HDR_GSO_TCPV4;
+                       hdr->gso_size = cookie->tso_segsz;
+                       hdr->hdr_len =
+                               cookie->l2_len +
+                               cookie->l3_len +
+                               cookie->l4_len;
+               } else {
+                       hdr->gso_type = 0;
+                       hdr->gso_size = 0;
+                       hdr->hdr_len = 0;
+               }
        }
 
        do {
@@ -471,24 +530,24 @@ int
 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        uint16_t queue_idx,
                        uint16_t nb_desc,
-                       unsigned int socket_id,
+                       unsigned int socket_id __rte_unused,
                        __rte_unused const struct rte_eth_rxconf *rx_conf,
                        struct rte_mempool *mp)
 {
        uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
+       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
        struct virtnet_rx *rxvq;
-       int ret;
 
        PMD_INIT_FUNC_TRACE();
-       ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx,
-                       nb_desc, socket_id, (void **)&rxvq);
-       if (ret < 0) {
-               PMD_INIT_LOG(ERR, "rvq initialization failed");
-               return ret;
-       }
 
-       /* Create mempool for rx mbuf allocation */
+       if (nb_desc == 0 || nb_desc > vq->vq_nentries)
+               nb_desc = vq->vq_nentries;
+       vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
+
+       rxvq = &vq->rxq;
        rxvq->mpool = mp;
+       rxvq->queue_id = queue_idx;
 
        dev->data->rx_queues[queue_idx] = rxvq;
 
@@ -497,27 +556,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
        return 0;
 }
 
-void
-virtio_dev_rx_queue_release(void *rxq)
-{
-       struct virtnet_rx *rxvq = rxq;
-       struct virtqueue *vq;
-       const struct rte_memzone *mz;
-
-       if (rxvq == NULL)
-               return;
-
-       /*
-        * rxvq is freed when vq is freed, and as mz should be freed after the
-        * del_queue, so we reserve the mz pointer first.
-        */
-       vq = rxvq->vq;
-       mz = rxvq->mz;
-
-       virtio_dev_queue_release(vq);
-       rte_memzone_free(mz);
-}
-
 static void
 virtio_update_rxtx_handler(struct rte_eth_dev *dev,
                           const struct rte_eth_txconf *tx_conf)
@@ -554,27 +592,25 @@ int
 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        uint16_t queue_idx,
                        uint16_t nb_desc,
-                       unsigned int socket_id,
+                       unsigned int socket_id __rte_unused,
                        const struct rte_eth_txconf *tx_conf)
 {
        uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
+       struct virtio_hw *hw = dev->data->dev_private;
+       struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
        struct virtnet_tx *txvq;
-       struct virtqueue *vq;
        uint16_t tx_free_thresh;
-       int ret;
 
        PMD_INIT_FUNC_TRACE();
 
-
        virtio_update_rxtx_handler(dev, tx_conf);
 
-       ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx,
-                       nb_desc, socket_id, (void **)&txvq);
-       if (ret < 0) {
-               PMD_INIT_LOG(ERR, "tvq initialization failed");
-               return ret;
-       }
-       vq = txvq->vq;
+       if (nb_desc == 0 || nb_desc > vq->vq_nentries)
+               nb_desc = vq->vq_nentries;
+       vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
+
+       txvq = &vq->txq;
+       txvq->queue_id = queue_idx;
 
        tx_free_thresh = tx_conf->tx_free_thresh;
        if (tx_free_thresh == 0)
@@ -596,30 +632,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
        return 0;
 }
 
-void
-virtio_dev_tx_queue_release(void *txq)
-{
-       struct virtnet_tx *txvq = txq;
-       struct virtqueue *vq;
-       const struct rte_memzone *mz;
-       const struct rte_memzone *hdr_mz;
-
-       if (txvq == NULL)
-               return;
-
-       /*
-        * txvq is freed when vq is freed, and as mz should be freed after the
-        * del_queue, so we reserve the mz pointer first.
-        */
-       vq = txvq->vq;
-       mz = txvq->mz;
-       hdr_mz = txvq->virtio_net_hdr_mz;
-
-       virtio_dev_queue_release(vq);
-       rte_memzone_free(mz);
-       rte_memzone_free(hdr_mz);
-}
-
 static void
 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
 {