net/mlx5: fix RSS expansion for patterns with ICMP item
[dpdk.git] / drivers / net / vmxnet3 / vmxnet3_rxtx.c
index 73e270f..a875ffe 100644 (file)
@@ -32,7 +32,7 @@
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_prefetch.h>
 #include <rte_ip.h>
 #include <rte_udp.h>
 #include "vmxnet3_logs.h"
 #include "vmxnet3_ethdev.h"
 
-#define        VMXNET3_TX_OFFLOAD_MASK ( \
-               PKT_TX_VLAN_PKT | \
-               PKT_TX_IPV6 |     \
-               PKT_TX_IPV4 |     \
-               PKT_TX_L4_MASK |  \
-               PKT_TX_TCP_SEG)
+#define        VMXNET3_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_VLAN | \
+               RTE_MBUF_F_TX_IPV6 |     \
+               RTE_MBUF_F_TX_IPV4 |     \
+               RTE_MBUF_F_TX_L4_MASK |  \
+               RTE_MBUF_F_TX_TCP_SEG)
 
 #define        VMXNET3_TX_OFFLOAD_NOTSUP_MASK  \
-       (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
+       (RTE_MBUF_F_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
 
 static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
 
@@ -165,9 +164,9 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
 }
 
 void
-vmxnet3_dev_tx_queue_release(void *txq)
+vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       vmxnet3_tx_queue_t *tq = txq;
+       vmxnet3_tx_queue_t *tq = dev->data->tx_queues[qid];
 
        if (tq != NULL) {
                /* Release mbufs */
@@ -182,10 +181,10 @@ vmxnet3_dev_tx_queue_release(void *txq)
 }
 
 void
-vmxnet3_dev_rx_queue_release(void *rxq)
+vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
        int i;
-       vmxnet3_rx_queue_t *rq = rxq;
+       vmxnet3_rx_queue_t *rq = dev->data->rx_queues[qid];
 
        if (rq != NULL) {
                /* Release mbufs */
@@ -341,6 +340,9 @@ vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
        }
 
        PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
+
+       /* To avoid compiler warnings when not in DEBUG mode. */
+       RTE_SET_USED(completed);
 }
 
 uint16_t
@@ -359,7 +361,7 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                /* Non-TSO packet cannot occupy more than
                 * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
                 */
-               if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
+               if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0 &&
                                m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
                        rte_errno = EINVAL;
                        return i;
@@ -367,8 +369,8 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 
                /* check that only supported TX offloads are requested. */
                if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
-                               (ol_flags & PKT_TX_L4_MASK) ==
-                               PKT_TX_SCTP_CKSUM) {
+                               (ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
+                               RTE_MBUF_F_TX_SCTP_CKSUM) {
                        rte_errno = ENOTSUP;
                        return i;
                }
@@ -416,7 +418,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                struct rte_mbuf *txm = tx_pkts[nb_tx];
                struct rte_mbuf *m_seg = txm;
                int copy_size = 0;
-               bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
+               bool tso = (txm->ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0;
                /* # of descriptors needed for a packet. */
                unsigned count = txm->nb_segs;
 
@@ -520,7 +522,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
                /* Add VLAN tag if present */
                gdesc = txq->cmd_ring.base + first2fill;
-               if (txm->ol_flags & PKT_TX_VLAN_PKT) {
+               if (txm->ol_flags & RTE_MBUF_F_TX_VLAN) {
                        gdesc->txd.ti = 1;
                        gdesc->txd.tci = txm->vlan_tci;
                }
@@ -535,23 +537,23 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        gdesc->txd.msscof = mss;
 
                        deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
-               } else if (txm->ol_flags & PKT_TX_L4_MASK) {
+               } else if (txm->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
                        gdesc->txd.om = VMXNET3_OM_CSUM;
                        gdesc->txd.hlen = txm->l2_len + txm->l3_len;
 
-                       switch (txm->ol_flags & PKT_TX_L4_MASK) {
-                       case PKT_TX_TCP_CKSUM:
+                       switch (txm->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+                       case RTE_MBUF_F_TX_TCP_CKSUM:
                                gdesc->txd.msscof = gdesc->txd.hlen +
                                        offsetof(struct rte_tcp_hdr, cksum);
                                break;
-                       case PKT_TX_UDP_CKSUM:
+                       case RTE_MBUF_F_TX_UDP_CKSUM:
                                gdesc->txd.msscof = gdesc->txd.hlen +
                                        offsetof(struct rte_udp_hdr,
                                                dgram_cksum);
                                break;
                        default:
                                PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
-                                          txm->ol_flags & PKT_TX_L4_MASK);
+                                          txm->ol_flags & RTE_MBUF_F_TX_L4_MASK);
                                abort();
                        }
                        deferred++;
@@ -674,6 +676,7 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
        struct rte_ipv6_hdr *ipv6_hdr;
        struct rte_tcp_hdr *tcp_hdr;
        char *ptr;
+       uint8_t segs;
 
        RTE_ASSERT(rcd->tcp);
 
@@ -687,8 +690,7 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                                        - sizeof(struct rte_tcp_hdr);
 
                ipv4_hdr = (struct rte_ipv4_hdr *)(ptr + hlen);
-               hlen += (ipv4_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
-                               RTE_IPV4_IHL_MULTIPLIER;
+               hlen += rte_ipv4_hdr_len(ipv4_hdr);
        } else if (rcd->v6) {
                if (unlikely(slen < hlen + sizeof(struct rte_ipv6_hdr)))
                        return hw->mtu - sizeof(struct rte_ipv6_hdr) -
@@ -711,9 +713,9 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
        tcp_hdr = (struct rte_tcp_hdr *)(ptr + hlen);
        hlen += (tcp_hdr->data_off & 0xf0) >> 2;
 
-       if (rxm->udata64 > 1)
-               return (rte_pktmbuf_pkt_len(rxm) - hlen +
-                               rxm->udata64 - 1) / rxm->udata64;
+       segs = *vmxnet3_segs_dynfield(rxm);
+       if (segs > 1)
+               return (rte_pktmbuf_pkt_len(rxm) - hlen + segs - 1) / segs;
        else
                return hw->mtu - hlen + sizeof(struct rte_ether_hdr);
 }
@@ -738,36 +740,53 @@ vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                                        (const Vmxnet3_RxCompDescExt *)rcd;
 
                        rxm->tso_segsz = rcde->mss;
-                       rxm->udata64 = rcde->segCnt;
-                       ol_flags |= PKT_RX_LRO;
+                       *vmxnet3_segs_dynfield(rxm) = rcde->segCnt;
+                       ol_flags |= RTE_MBUF_F_RX_LRO;
                }
        } else { /* Offloads set in eop */
                /* Check for RSS */
                if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
-                       ol_flags |= PKT_RX_RSS_HASH;
+                       ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
                        rxm->hash.rss = rcd->rssHash;
                }
 
                /* Check for hardware stripped VLAN tag */
                if (rcd->ts) {
-                       ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+                       ol_flags |= (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
                        rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
                }
 
                /* Check packet type, checksum errors, etc. */
                if (rcd->cnc) {
-                       ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+                       ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
+
+                       if (rcd->v4) {
+                               packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+                               if (rcd->tcp)
+                                       packet_type |= RTE_PTYPE_L4_TCP;
+                               else if (rcd->udp)
+                                       packet_type |= RTE_PTYPE_L4_UDP;
+                       } else if (rcd->v6) {
+                               packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+                               if (rcd->tcp)
+                                       packet_type |= RTE_PTYPE_L4_TCP;
+                               else if (rcd->udp)
+                                       packet_type |= RTE_PTYPE_L4_UDP;
+                       } else {
+                               packet_type |= RTE_PTYPE_UNKNOWN;
+                       }
+
                } else {
                        if (rcd->v4) {
                                packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
 
                                if (rcd->ipc)
-                                       ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+                                       ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
                                else
-                                       ol_flags |= PKT_RX_IP_CKSUM_BAD;
+                                       ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
 
                                if (rcd->tuc) {
-                                       ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+                                       ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
                                        if (rcd->tcp)
                                                packet_type |= RTE_PTYPE_L4_TCP;
                                        else
@@ -775,17 +794,17 @@ vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                                } else {
                                        if (rcd->tcp) {
                                                packet_type |= RTE_PTYPE_L4_TCP;
-                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                               ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                                        } else if (rcd->udp) {
                                                packet_type |= RTE_PTYPE_L4_UDP;
-                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                               ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                                        }
                                }
                        } else if (rcd->v6) {
                                packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
 
                                if (rcd->tuc) {
-                                       ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+                                       ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
                                        if (rcd->tcp)
                                                packet_type |= RTE_PTYPE_L4_TCP;
                                        else
@@ -793,10 +812,10 @@ vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                                } else {
                                        if (rcd->tcp) {
                                                packet_type |= RTE_PTYPE_L4_TCP;
-                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                               ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                                        } else if (rcd->udp) {
                                                packet_type |= RTE_PTYPE_L4_UDP;
-                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                               ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                                        }
                                }
                        } else {
@@ -804,7 +823,7 @@ vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                        }
 
                        /* Old variants of vmxnet3 do not provide MSS */
-                       if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0)
+                       if ((ol_flags & RTE_MBUF_F_RX_LRO) && rxm->tso_segsz == 0)
                                rxm->tso_segsz = vmxnet3_guess_mss(hw,
                                                rcd, rxm);
                }
@@ -1017,6 +1036,36 @@ rcd_done:
        return nb_rx;
 }
 
+uint32_t
+vmxnet3_dev_rx_queue_count(void *rx_queue)
+{
+       const vmxnet3_rx_queue_t *rxq;
+       const Vmxnet3_RxCompDesc *rcd;
+       uint32_t idx, nb_rxd = 0;
+       uint8_t gen;
+
+       rxq = rx_queue;
+       if (unlikely(rxq->stopped)) {
+               PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
+               return 0;
+       }
+
+       gen = rxq->comp_ring.gen;
+       idx = rxq->comp_ring.next2proc;
+       rcd = &rxq->comp_ring.base[idx].rcd;
+       while (rcd->gen == gen) {
+               if (rcd->eop)
+                       ++nb_rxd;
+               if (++idx == rxq->comp_ring.size) {
+                       idx = 0;
+                       gen ^= 1;
+               }
+               rcd = &rxq->comp_ring.base[idx].rcd;
+       }
+
+       return nb_rxd;
+}
+
 int
 vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
                           uint16_t queue_idx,
@@ -1326,13 +1375,13 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
        rss_hf = port_rss_conf->rss_hf &
                (VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
                cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
                cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
                cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
                cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
 
        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
@@ -1368,7 +1417,7 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
        /* loading hashKeySize */
        dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
        /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
-       dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
+       dev_rss_conf->indTableSize = (uint16_t)((MAX_RX_QUEUES(hw)) * 4);
 
        if (port_rss_conf->rss_key == NULL) {
                /* Default hash key */
@@ -1389,13 +1438,13 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
        /* loading hashType */
        dev_rss_conf->hashType = 0;
        rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
-       if (rss_hf & ETH_RSS_IPV4)
+       if (rss_hf & RTE_ETH_RSS_IPV4)
                dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
                dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
-       if (rss_hf & ETH_RSS_IPV6)
+       if (rss_hf & RTE_ETH_RSS_IPV6)
                dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
                dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
 
        return VMXNET3_SUCCESS;