net/ngbe: support MAC filters
[dpdk.git] / drivers / net / vmxnet3 / vmxnet3_rxtx.c
index e10f9ee..deba64b 100644 (file)
@@ -32,7 +32,7 @@
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_prefetch.h>
 #include <rte_ip.h>
 #include <rte_udp.h>
 #include "vmxnet3_logs.h"
 #include "vmxnet3_ethdev.h"
 
-#define        VMXNET3_TX_OFFLOAD_MASK ( \
-               PKT_TX_VLAN_PKT | \
-               PKT_TX_IPV6 |     \
-               PKT_TX_IPV4 |     \
-               PKT_TX_L4_MASK |  \
-               PKT_TX_TCP_SEG)
+#define        VMXNET3_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_VLAN | \
+               RTE_MBUF_F_TX_IPV6 |     \
+               RTE_MBUF_F_TX_IPV4 |     \
+               RTE_MBUF_F_TX_L4_MASK |  \
+               RTE_MBUF_F_TX_TCP_SEG)
 
 #define        VMXNET3_TX_OFFLOAD_NOTSUP_MASK  \
-       (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
+       (RTE_MBUF_F_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
 
 static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
 
@@ -165,9 +164,9 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
 }
 
 void
-vmxnet3_dev_tx_queue_release(void *txq)
+vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       vmxnet3_tx_queue_t *tq = txq;
+       vmxnet3_tx_queue_t *tq = dev->data->tx_queues[qid];
 
        if (tq != NULL) {
                /* Release mbufs */
@@ -182,10 +181,10 @@ vmxnet3_dev_tx_queue_release(void *txq)
 }
 
 void
-vmxnet3_dev_rx_queue_release(void *rxq)
+vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
        int i;
-       vmxnet3_rx_queue_t *rq = rxq;
+       vmxnet3_rx_queue_t *rq = dev->data->rx_queues[qid];
 
        if (rq != NULL) {
                /* Release mbufs */
@@ -359,7 +358,7 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                /* Non-TSO packet cannot occupy more than
                 * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
                 */
-               if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
+               if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0 &&
                                m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
                        rte_errno = EINVAL;
                        return i;
@@ -367,8 +366,8 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
 
                /* check that only supported TX offloads are requested. */
                if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
-                               (ol_flags & PKT_TX_L4_MASK) ==
-                               PKT_TX_SCTP_CKSUM) {
+                               (ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
+                               RTE_MBUF_F_TX_SCTP_CKSUM) {
                        rte_errno = ENOTSUP;
                        return i;
                }
@@ -416,7 +415,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                struct rte_mbuf *txm = tx_pkts[nb_tx];
                struct rte_mbuf *m_seg = txm;
                int copy_size = 0;
-               bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
+               bool tso = (txm->ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0;
                /* # of descriptors needed for a packet. */
                unsigned count = txm->nb_segs;
 
@@ -520,7 +519,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 
                /* Add VLAN tag if present */
                gdesc = txq->cmd_ring.base + first2fill;
-               if (txm->ol_flags & PKT_TX_VLAN_PKT) {
+               if (txm->ol_flags & RTE_MBUF_F_TX_VLAN) {
                        gdesc->txd.ti = 1;
                        gdesc->txd.tci = txm->vlan_tci;
                }
@@ -535,23 +534,23 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        gdesc->txd.msscof = mss;
 
                        deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
-               } else if (txm->ol_flags & PKT_TX_L4_MASK) {
+               } else if (txm->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
                        gdesc->txd.om = VMXNET3_OM_CSUM;
                        gdesc->txd.hlen = txm->l2_len + txm->l3_len;
 
-                       switch (txm->ol_flags & PKT_TX_L4_MASK) {
-                       case PKT_TX_TCP_CKSUM:
+                       switch (txm->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+                       case RTE_MBUF_F_TX_TCP_CKSUM:
                                gdesc->txd.msscof = gdesc->txd.hlen +
                                        offsetof(struct rte_tcp_hdr, cksum);
                                break;
-                       case PKT_TX_UDP_CKSUM:
+                       case RTE_MBUF_F_TX_UDP_CKSUM:
                                gdesc->txd.msscof = gdesc->txd.hlen +
                                        offsetof(struct rte_udp_hdr,
                                                dgram_cksum);
                                break;
                        default:
                                PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
-                                          txm->ol_flags & PKT_TX_L4_MASK);
+                                          txm->ol_flags & RTE_MBUF_F_TX_L4_MASK);
                                abort();
                        }
                        deferred++;
@@ -674,6 +673,7 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
        struct rte_ipv6_hdr *ipv6_hdr;
        struct rte_tcp_hdr *tcp_hdr;
        char *ptr;
+       uint8_t segs;
 
        RTE_ASSERT(rcd->tcp);
 
@@ -710,9 +710,9 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
        tcp_hdr = (struct rte_tcp_hdr *)(ptr + hlen);
        hlen += (tcp_hdr->data_off & 0xf0) >> 2;
 
-       if (rxm->udata64 > 1)
-               return (rte_pktmbuf_pkt_len(rxm) - hlen +
-                               rxm->udata64 - 1) / rxm->udata64;
+       segs = *vmxnet3_segs_dynfield(rxm);
+       if (segs > 1)
+               return (rte_pktmbuf_pkt_len(rxm) - hlen + segs - 1) / segs;
        else
                return hw->mtu - hlen + sizeof(struct rte_ether_hdr);
 }
@@ -737,36 +737,36 @@ vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                                        (const Vmxnet3_RxCompDescExt *)rcd;
 
                        rxm->tso_segsz = rcde->mss;
-                       rxm->udata64 = rcde->segCnt;
-                       ol_flags |= PKT_RX_LRO;
+                       *vmxnet3_segs_dynfield(rxm) = rcde->segCnt;
+                       ol_flags |= RTE_MBUF_F_RX_LRO;
                }
        } else { /* Offloads set in eop */
                /* Check for RSS */
                if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
-                       ol_flags |= PKT_RX_RSS_HASH;
+                       ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
                        rxm->hash.rss = rcd->rssHash;
                }
 
                /* Check for hardware stripped VLAN tag */
                if (rcd->ts) {
-                       ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+                       ol_flags |= (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
                        rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
                }
 
                /* Check packet type, checksum errors, etc. */
                if (rcd->cnc) {
-                       ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+                       ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
                } else {
                        if (rcd->v4) {
                                packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
 
                                if (rcd->ipc)
-                                       ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+                                       ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
                                else
-                                       ol_flags |= PKT_RX_IP_CKSUM_BAD;
+                                       ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
 
                                if (rcd->tuc) {
-                                       ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+                                       ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
                                        if (rcd->tcp)
                                                packet_type |= RTE_PTYPE_L4_TCP;
                                        else
@@ -774,17 +774,17 @@ vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                                } else {
                                        if (rcd->tcp) {
                                                packet_type |= RTE_PTYPE_L4_TCP;
-                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                               ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                                        } else if (rcd->udp) {
                                                packet_type |= RTE_PTYPE_L4_UDP;
-                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                               ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                                        }
                                }
                        } else if (rcd->v6) {
                                packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
 
                                if (rcd->tuc) {
-                                       ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+                                       ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
                                        if (rcd->tcp)
                                                packet_type |= RTE_PTYPE_L4_TCP;
                                        else
@@ -792,10 +792,10 @@ vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                                } else {
                                        if (rcd->tcp) {
                                                packet_type |= RTE_PTYPE_L4_TCP;
-                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                               ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                                        } else if (rcd->udp) {
                                                packet_type |= RTE_PTYPE_L4_UDP;
-                                               ol_flags |= PKT_RX_L4_CKSUM_BAD;
+                                               ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
                                        }
                                }
                        } else {
@@ -803,7 +803,7 @@ vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
                        }
 
                        /* Old variants of vmxnet3 do not provide MSS */
-                       if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0)
+                       if ((ol_flags & RTE_MBUF_F_RX_LRO) && rxm->tso_segsz == 0)
                                rxm->tso_segsz = vmxnet3_guess_mss(hw,
                                                rcd, rxm);
                }
@@ -1325,13 +1325,13 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev)
        rss_hf = port_rss_conf->rss_hf &
                (VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
 
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
                cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
                cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
                cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
                cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
 
        VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
@@ -1388,13 +1388,13 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
        /* loading hashType */
        dev_rss_conf->hashType = 0;
        rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
-       if (rss_hf & ETH_RSS_IPV4)
+       if (rss_hf & RTE_ETH_RSS_IPV4)
                dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
-       if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
                dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
-       if (rss_hf & ETH_RSS_IPV6)
+       if (rss_hf & RTE_ETH_RSS_IPV6)
                dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
-       if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+       if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
                dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
 
        return VMXNET3_SUCCESS;