net: add rte prefix to ether structures
[dpdk.git] / drivers / net / vmxnet3 / vmxnet3_rxtx.c
index 6649c3f..b691141 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
  */
 
 #include <sys/queue.h>
@@ -61,7 +32,7 @@
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
 #include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_prefetch.h>
 #include <rte_ip.h>
 #include <rte_udp.h>
@@ -79,6 +50,8 @@
 
 #define        VMXNET3_TX_OFFLOAD_MASK ( \
                PKT_TX_VLAN_PKT | \
+               PKT_TX_IPV6 |     \
+               PKT_TX_IPV4 |     \
                PKT_TX_L4_MASK |  \
                PKT_TX_TCP_SEG)
 
@@ -201,6 +174,10 @@ vmxnet3_dev_tx_queue_release(void *txq)
                vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
                /* Release the cmd_ring */
                vmxnet3_cmd_ring_release(&tq->cmd_ring);
+               /* Release the memzone */
+               rte_memzone_free(tq->mz);
+               /* Release the queue */
+               rte_free(tq);
        }
 }
 
@@ -218,6 +195,12 @@ vmxnet3_dev_rx_queue_release(void *rxq)
                /* Release both the cmd_rings */
                for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
                        vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
+
+               /* Release the memzone */
+               rte_memzone_free(rq->mz);
+
+               /* Release the queue */
+               rte_free(rq);
        }
 }
 
@@ -260,11 +243,9 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
        struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
        int size;
 
-       if (rq != NULL) {
-               /* Release both the cmd_rings mbufs */
-               for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
-                       vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
-       }
+       /* Release both the cmd_rings mbufs */
+       for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+               vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
 
        ring0 = &rq->cmd_ring[0];
        ring1 = &rq->cmd_ring[1];
@@ -478,6 +459,14 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                    rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
                        struct Vmxnet3_TxDataDesc *tdd;
 
+                       /* Skip empty packets */
+                       if (unlikely(rte_pktmbuf_pkt_len(txm) == 0)) {
+                               txq->stats.drop_total++;
+                               rte_pktmbuf_free(txm);
+                               nb_tx++;
+                               continue;
+                       }
+
                        tdd = (struct Vmxnet3_TxDataDesc *)
                                ((uint8 *)txq->data_ring.base +
                                 txq->cmd_ring.next2fill *
@@ -498,14 +487,20 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                         * maximum size of mbuf segment size.
                         */
                        gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
+
+                       /* Skip empty segments */
+                       if (unlikely(m_seg->data_len == 0))
+                               continue;
+
                        if (copy_size) {
-                               uint64 offset = txq->cmd_ring.next2fill *
-                                               txq->txdata_desc_size;
+                               uint64 offset =
+                                       (uint64)txq->cmd_ring.next2fill *
+                                                       txq->txdata_desc_size;
                                gdesc->txd.addr =
                                        rte_cpu_to_le_64(txq->data_ring.basePA +
                                                         offset);
                        } else {
-                               gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+                               gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
                        }
 
                        gdesc->dword[2] = dw2 | m_seg->data_len;
@@ -588,24 +583,40 @@ static inline void
 vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
                   struct rte_mbuf *mbuf)
 {
-       uint32_t val = 0;
+       uint32_t val;
        struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
        struct Vmxnet3_RxDesc *rxd =
                (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
        vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
 
-       if (ring_id == 0)
+       if (ring_id == 0) {
+               /* Usually: One HEAD type buf per packet
+                * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
+                * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
+                */
+
+               /* We use single packet buffer so all heads here */
                val = VMXNET3_RXD_BTYPE_HEAD;
-       else
+       } else {
+               /* All BODY type buffers for 2nd ring */
                val = VMXNET3_RXD_BTYPE_BODY;
+       }
 
+       /*
+        * Load mbuf pointer into buf_info[ring_size]
+        * buf_info structure is equivalent to cookie for virtio-virtqueue
+        */
        buf_info->m = mbuf;
        buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
-       buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+       buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
 
+       /* Load Rx Descriptor with the buffer's GPA */
        rxd->addr = buf_info->bufPA;
+
+       /* After this point rxd->addr MUST not be NULL */
        rxd->btype = val;
        rxd->len = buf_info->len;
+       /* Flip gen bit at the end to change ownership */
        rxd->gen = ring->gen;
 
        vmxnet3_cmd_ring_adv_next2fill(ring);
@@ -624,28 +635,11 @@ static int
 vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
 {
        int err = 0;
-       uint32_t i = 0, val = 0;
+       uint32_t i = 0;
        struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
 
-       if (ring_id == 0) {
-               /* Usually: One HEAD type buf per packet
-                * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
-                * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
-                */
-
-               /* We use single packet buffer so all heads here */
-               val = VMXNET3_RXD_BTYPE_HEAD;
-       } else {
-               /* All BODY type buffers for 2nd ring */
-               val = VMXNET3_RXD_BTYPE_BODY;
-       }
-
        while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
-               struct Vmxnet3_RxDesc *rxd;
                struct rte_mbuf *mbuf;
-               vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
-
-               rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
 
                /* Allocate blank mbuf for the current Rx Descriptor */
                mbuf = rte_mbuf_raw_alloc(rxq->mp);
@@ -656,25 +650,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
                        break;
                }
 
-               /*
-                * Load mbuf pointer into buf_info[ring_size]
-                * buf_info structure is equivalent to cookie for virtio-virtqueue
-                */
-               buf_info->m = mbuf;
-               buf_info->len = (uint16_t)(mbuf->buf_len -
-                                          RTE_PKTMBUF_HEADROOM);
-               buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
-
-               /* Load Rx Descriptor with the buffer's GPA */
-               rxd->addr = buf_info->bufPA;
-
-               /* After this point rxd->addr MUST not be NULL */
-               rxd->btype = val;
-               rxd->len = buf_info->len;
-               /* Flip gen bit at the end to change ownership */
-               rxd->gen = ring->gen;
-
-               vmxnet3_cmd_ring_adv_next2fill(ring);
+               vmxnet3_renew_desc(rxq, ring_id, mbuf);
                i++;
        }
 
@@ -685,35 +661,154 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
                return i;
 }
 
-
-/* Receive side checksum and other offloads */
-static void
-vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
+/* MSS not provided by vmxnet3, guess one with available information */
+static uint16_t
+vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
+               struct rte_mbuf *rxm)
 {
-       /* Check for RSS */
-       if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
-               rxm->ol_flags |= PKT_RX_RSS_HASH;
-               rxm->hash.rss = rcd->rssHash;
-       }
+       uint32_t hlen, slen;
+       struct ipv4_hdr *ipv4_hdr;
+       struct ipv6_hdr *ipv6_hdr;
+       struct tcp_hdr *tcp_hdr;
+       char *ptr;
+
+       RTE_ASSERT(rcd->tcp);
+
+       ptr = rte_pktmbuf_mtod(rxm, char *);
+       slen = rte_pktmbuf_data_len(rxm);
+       hlen = sizeof(struct rte_ether_hdr);
 
-       /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
        if (rcd->v4) {
-               struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
-               struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
+               if (unlikely(slen < hlen + sizeof(struct ipv4_hdr)))
+                       return hw->mtu - sizeof(struct ipv4_hdr)
+                                       - sizeof(struct tcp_hdr);
+
+               ipv4_hdr = (struct ipv4_hdr *)(ptr + hlen);
+               hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
+                               IPV4_IHL_MULTIPLIER;
+       } else if (rcd->v6) {
+               if (unlikely(slen < hlen + sizeof(struct ipv6_hdr)))
+                       return hw->mtu - sizeof(struct ipv6_hdr) -
+                                       sizeof(struct tcp_hdr);
+
+               ipv6_hdr = (struct ipv6_hdr *)(ptr + hlen);
+               hlen += sizeof(struct ipv6_hdr);
+               if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) {
+                       int frag;
+
+                       rte_net_skip_ip6_ext(ipv6_hdr->proto, rxm,
+                                       &hlen, &frag);
+               }
+       }
 
-               if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
-                       rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
-               else
-                       rxm->packet_type = RTE_PTYPE_L3_IPV4;
+       if (unlikely(slen < hlen + sizeof(struct tcp_hdr)))
+               return hw->mtu - hlen - sizeof(struct tcp_hdr) +
+                               sizeof(struct rte_ether_hdr);
 
-               if (!rcd->cnc) {
-                       if (!rcd->ipc)
-                               rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+       tcp_hdr = (struct tcp_hdr *)(ptr + hlen);
+       hlen += (tcp_hdr->data_off & 0xf0) >> 2;
 
-                       if ((rcd->tcp || rcd->udp) && !rcd->tuc)
-                               rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+       if (rxm->udata64 > 1)
+               return (rte_pktmbuf_pkt_len(rxm) - hlen +
+                               rxm->udata64 - 1) / rxm->udata64;
+       else
+               return hw->mtu - hlen + sizeof(struct rte_ether_hdr);
+}
+
+/* Receive side checksum and other offloads */
+static inline void
+vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
+               struct rte_mbuf *rxm, const uint8_t sop)
+{
+       uint64_t ol_flags = rxm->ol_flags;
+       uint32_t packet_type = rxm->packet_type;
+
+       /* Offloads set in sop */
+       if (sop) {
+               /* Set packet type */
+               packet_type |= RTE_PTYPE_L2_ETHER;
+
+               /* Check large packet receive */
+               if (VMXNET3_VERSION_GE_2(hw) &&
+                   rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
+                       const Vmxnet3_RxCompDescExt *rcde =
+                                       (const Vmxnet3_RxCompDescExt *)rcd;
+
+                       rxm->tso_segsz = rcde->mss;
+                       rxm->udata64 = rcde->segCnt;
+                       ol_flags |= PKT_RX_LRO;
+               }
+       } else { /* Offloads set in eop */
+               /* Check for RSS */
+               if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
+                       ol_flags |= PKT_RX_RSS_HASH;
+                       rxm->hash.rss = rcd->rssHash;
+               }
+
+               /* Check for hardware stripped VLAN tag */
+               if (rcd->ts) {
+                       ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+                       rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+               }
+
+               /* Check packet type, checksum errors, etc. */
+               if (rcd->cnc) {
+                       ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+               } else {
+                       if (rcd->v4) {
+                               packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
+                               if (rcd->ipc)
+                                       ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+                               else
+                                       ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+                               if (rcd->tuc) {
+                                       ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+                                       if (rcd->tcp)
+                                               packet_type |= RTE_PTYPE_L4_TCP;
+                                       else
+                                               packet_type |= RTE_PTYPE_L4_UDP;
+                               } else {
+                                       if (rcd->tcp) {
+                                               packet_type |= RTE_PTYPE_L4_TCP;
+                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                       } else if (rcd->udp) {
+                                               packet_type |= RTE_PTYPE_L4_UDP;
+                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                       }
+                               }
+                       } else if (rcd->v6) {
+                               packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+                               if (rcd->tuc) {
+                                       ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+                                       if (rcd->tcp)
+                                               packet_type |= RTE_PTYPE_L4_TCP;
+                                       else
+                                               packet_type |= RTE_PTYPE_L4_UDP;
+                               } else {
+                                       if (rcd->tcp) {
+                                               packet_type |= RTE_PTYPE_L4_TCP;
+                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                       } else if (rcd->udp) {
+                                               packet_type |= RTE_PTYPE_L4_UDP;
+                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                       }
+                               }
+                       } else {
+                               packet_type |= RTE_PTYPE_UNKNOWN;
+                       }
+
+                       /* Old variants of vmxnet3 do not provide MSS */
+                       if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0)
+                               rxm->tso_segsz = vmxnet3_guess_mss(hw,
+                                               rcd, rxm);
                }
        }
+
+       rxm->ol_flags = ol_flags;
+       rxm->packet_type = packet_type;
 }
 
 /*
@@ -795,6 +890,12 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                                   (int)(rcd - (struct Vmxnet3_RxCompDesc *)
                                         rxq->comp_ring.base), rcd->rxdIdx);
                        rte_pktmbuf_free_seg(rxm);
+                       if (rxq->start_seg) {
+                               struct rte_mbuf *start = rxq->start_seg;
+
+                               rxq->start_seg = NULL;
+                               rte_pktmbuf_free(start);
+                       }
                        goto rcd_done;
                }
 
@@ -807,6 +908,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                rxm->data_off = RTE_PKTMBUF_HEADROOM;
                rxm->ol_flags = 0;
                rxm->vlan_tci = 0;
+               rxm->packet_type = 0;
 
                /*
                 * If this is the first buffer of the received packet,
@@ -838,28 +940,28 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        }
 
                        rxq->start_seg = rxm;
-                       vmxnet3_rx_offload(rcd, rxm);
+                       rxq->last_seg = rxm;
+                       vmxnet3_rx_offload(hw, rcd, rxm, 1);
                } else {
                        struct rte_mbuf *start = rxq->start_seg;
 
                        RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
 
-                       start->pkt_len += rxm->data_len;
-                       start->nb_segs++;
+                       if (rxm->data_len) {
+                               start->pkt_len += rxm->data_len;
+                               start->nb_segs++;
 
-                       rxq->last_seg->next = rxm;
+                               rxq->last_seg->next = rxm;
+                               rxq->last_seg = rxm;
+                       } else {
+                               rte_pktmbuf_free_seg(rxm);
+                       }
                }
-               rxq->last_seg = rxm;
 
                if (rcd->eop) {
                        struct rte_mbuf *start = rxq->start_seg;
 
-                       /* Check for hardware stripped VLAN tag */
-                       if (rcd->ts) {
-                               start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
-                               start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
-                       }
-
+                       vmxnet3_rx_offload(hw, rcd, start, 0);
                        rx_pkts[nb_rx++] = start;
                        rxq->start_seg = NULL;
                }
@@ -888,31 +990,24 @@ rcd_done:
                }
        }
 
-       return nb_rx;
-}
-
-/*
- * Create memzone for device rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
-                     uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
-       char z_name[RTE_MEMZONE_NAMESIZE];
-       const struct rte_memzone *mz;
-
-       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                dev->driver->pci_drv.driver.name, ring_name,
-                dev->data->port_id, queue_id);
-
-       mz = rte_memzone_lookup(z_name);
-       if (mz)
-               return mz;
+       if (unlikely(nb_rxd == 0)) {
+               uint32_t avail;
+               for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
+                       avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[ring_idx]);
+                       if (unlikely(avail > 0)) {
+                               /* try to alloc new buf and renew descriptors */
+                               vmxnet3_post_rx_bufs(rxq, ring_idx);
+                       }
+               }
+               if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+                       for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
+                               VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
+                                                      rxq->cmd_ring[ring_idx].next2fill);
+                       }
+               }
+       }
 
-       return rte_memzone_reserve_aligned(z_name, ring_size,
-                                          socket_id, 0, VMXNET3_RING_BA_ALIGN);
+       return nb_rx;
 }
 
 int
@@ -920,7 +1015,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
                           uint16_t queue_idx,
                           uint16_t nb_desc,
                           unsigned int socket_id,
-                          __rte_unused const struct rte_eth_txconf *tx_conf)
+                          const struct rte_eth_txconf *tx_conf __rte_unused)
 {
        struct vmxnet3_hw *hw = dev->data->dev_private;
        const struct rte_memzone *mz;
@@ -932,12 +1027,6 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
-       if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
-           ETH_TXQ_FLAGS_NOXSUMSCTP) {
-               PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
-               return -EINVAL;
-       }
-
        txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
                          RTE_CACHE_LINE_SIZE);
        if (txq == NULL) {
@@ -947,7 +1036,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        txq->queue_id = queue_idx;
        txq->port_id = dev->data->port_id;
-       txq->shared = &hw->tqd_start[queue_idx];
+       txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
        txq->hw = hw;
        txq->qid = queue_idx;
        txq->stopped = TRUE;
@@ -983,16 +1072,18 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
        size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
        size += txq->txdata_desc_size * data_ring->size;
 
-       mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+       mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
+                                     VMXNET3_RING_BA_ALIGN, socket_id);
        if (mz == NULL) {
                PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
                return -ENOMEM;
        }
+       txq->mz = mz;
        memset(mz->addr, 0, mz->len);
 
        /* cmd_ring initialization */
        ring->base = mz->addr;
-       ring->basePA = mz->phys_addr;
+       ring->basePA = mz->iova;
 
        /* comp_ring initialization */
        comp_ring->base = ring->base + ring->size;
@@ -1048,7 +1139,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->mp = mp;
        rxq->queue_id = queue_idx;
        rxq->port_id = dev->data->port_id;
-       rxq->shared = &hw->rqd_start[queue_idx];
+       rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
        rxq->hw = hw;
        rxq->qid1 = queue_idx;
        rxq->qid2 = queue_idx + hw->num_rx_queues;
@@ -1092,16 +1183,18 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
        if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
                size += rxq->data_desc_size * data_ring->size;
 
-       mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
+       mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
+                                     VMXNET3_RING_BA_ALIGN, socket_id);
        if (mz == NULL) {
                PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
                return -ENOMEM;
        }
+       rxq->mz = mz;
        memset(mz->addr, 0, mz->len);
 
        /* cmd_ring0 initialization */
        ring0->base = mz->addr;
-       ring0->basePA = mz->phys_addr;
+       ring0->basePA = mz->iova;
 
        /* cmd_ring1 initialization */
        ring1->base = ring0->base + ring0->size;
@@ -1198,6 +1291,46 @@ static uint8_t rss_intel_key[40] = {
        0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
 };
 
+/*
+ * Additional RSS configurations based on vmxnet v4+ APIs
+ */
+int
+vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
+{
+       struct vmxnet3_hw *hw = dev->data->dev_private;
+       Vmxnet3_DriverShared *shared = hw->shared;
+       Vmxnet3_CmdInfo *cmdInfo = &shared->cu.cmdInfo;
+       struct rte_eth_rss_conf *port_rss_conf;
+       uint64_t rss_hf;
+       uint32_t ret;
+
+       PMD_INIT_FUNC_TRACE();
+
+       cmdInfo->setRSSFields = 0;
+       port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+       rss_hf = port_rss_conf->rss_hf &
+               (VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
+
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+               cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+               cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
+       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+               cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
+       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+               cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
+
+       VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+                              VMXNET3_CMD_SET_RSS_FIELDS);
+       ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
+
+       if (ret != VMXNET3_SUCCESS) {
+               PMD_DRV_LOG(ERR, "Set RSS fields (v4) failed: %d", ret);
+       }
+
+       return ret;
+}
+
 /*
  * Configure RSS feature
  */