ethdev: remove jumbo offload flag
[dpdk.git] / drivers / net / txgbe / txgbe_rxtx.c
index 1a26128..7e18dcc 100644 (file)
@@ -562,10 +562,10 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
        /* Tunnel */
        switch (oflags & PKT_TX_TUNNEL_MASK) {
        case PKT_TX_TUNNEL_VXLAN:
+       case PKT_TX_TUNNEL_VXLAN_GPE:
                ptype |= RTE_PTYPE_L2_ETHER |
                         RTE_PTYPE_L3_IPV4 |
-                        RTE_PTYPE_TUNNEL_VXLAN;
-               ptype |= RTE_PTYPE_INNER_L2_ETHER;
+                        RTE_PTYPE_TUNNEL_GRENAT;
                break;
        case PKT_TX_TUNNEL_GRE:
                ptype |= RTE_PTYPE_L2_ETHER |
@@ -579,11 +579,6 @@ tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
                         RTE_PTYPE_TUNNEL_GENEVE;
                ptype |= RTE_PTYPE_INNER_L2_ETHER;
                break;
-       case PKT_TX_TUNNEL_VXLAN_GPE:
-               ptype |= RTE_PTYPE_L2_ETHER |
-                        RTE_PTYPE_L3_IPV4 |
-                        RTE_PTYPE_TUNNEL_VXLAN_GPE;
-               break;
        case PKT_TX_TUNNEL_IPIP:
        case PKT_TX_TUNNEL_IP:
                ptype |= RTE_PTYPE_L2_ETHER |
@@ -696,6 +691,30 @@ txgbe_get_tun_len(struct rte_mbuf *mbuf)
        return tun_len;
 }
 
+static inline uint8_t
+txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt)
+{
+       uint64_t l2_none, l2_mac, l2_mac_vlan;
+       uint8_t ptid = 0;
+
+       if ((tx_pkt->ol_flags & (PKT_TX_TUNNEL_VXLAN |
+                               PKT_TX_TUNNEL_VXLAN_GPE)) == 0)
+               return ptid;
+
+       l2_none = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr);
+       l2_mac = l2_none + sizeof(struct rte_ether_hdr);
+       l2_mac_vlan = l2_mac + sizeof(struct rte_vlan_hdr);
+
+       if (tx_pkt->l2_len == l2_none)
+               ptid = TXGBE_PTID_TUN_EIG;
+       else if (tx_pkt->l2_len == l2_mac)
+               ptid = TXGBE_PTID_TUN_EIGM;
+       else if (tx_pkt->l2_len == l2_mac_vlan)
+               ptid = TXGBE_PTID_TUN_EIGMV;
+
+       return ptid;
+}
+
 uint16_t
 txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts)
@@ -759,6 +778,8 @@ txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                if (tx_ol_req) {
                        tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
                                        tx_pkt->packet_type);
+                       if (tx_offload.ptid & TXGBE_PTID_PKT_TUN)
+                               tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt);
                        tx_offload.l2_len = tx_pkt->l2_len;
                        tx_offload.l3_len = tx_pkt->l3_len;
                        tx_offload.l4_len = tx_pkt->l4_len;
@@ -1953,7 +1974,6 @@ txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
                   DEV_RX_OFFLOAD_UDP_CKSUM   |
                   DEV_RX_OFFLOAD_TCP_CKSUM   |
                   DEV_RX_OFFLOAD_KEEP_CRC    |
-                  DEV_RX_OFFLOAD_JUMBO_FRAME |
                   DEV_RX_OFFLOAD_VLAN_FILTER |
                   DEV_RX_OFFLOAD_RSS_HASH |
                   DEV_RX_OFFLOAD_SCATTER;
@@ -2109,9 +2129,9 @@ txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
 }
 
 void __rte_cold
-txgbe_dev_tx_queue_release(void *txq)
+txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       txgbe_tx_queue_release(txq);
+       txgbe_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 /* (Re)set dynamic txgbe_tx_queue fields to defaults */
@@ -2437,9 +2457,9 @@ txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
 }
 
 void __rte_cold
-txgbe_dev_rx_queue_release(void *rxq)
+txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       txgbe_rx_queue_release(rxq);
+       txgbe_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
 /*
@@ -2688,14 +2708,14 @@ txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
 }
 
 uint32_t
-txgbe_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+txgbe_dev_rx_queue_count(void *rx_queue)
 {
 #define TXGBE_RXQ_SCAN_INTERVAL 4
        volatile struct txgbe_rx_desc *rxdp;
        struct txgbe_rx_queue *rxq;
        uint32_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &rxq->rx_ring[rxq->rx_tail];
 
        while ((desc < rxq->nb_rx_desc) &&
@@ -2795,13 +2815,13 @@ txgbe_dev_free_queues(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
+               txgbe_dev_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
+               txgbe_dev_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
        dev->data->nb_tx_queues = 0;
@@ -4305,13 +4325,8 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
        /*
         * Configure jumbo frame support, if any.
         */
-       if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
-                       TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
-       } else {
-               wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
-                       TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
-       }
+       wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+               TXGBE_FRMSZ_MAX(dev->data->mtu + TXGBE_ETH_OVERHEAD));
 
        /*
         * If loopback mode is configured, set LPBK bit.
@@ -4373,8 +4388,8 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
                wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
 
                /* It adds dual VLAN length for supporting dual VLAN */
-               if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                                           2 * TXGBE_VLAN_TAG_SIZE > buf_size)
+               if (dev->data->mtu + TXGBE_ETH_OVERHEAD +
+                               2 * TXGBE_VLAN_TAG_SIZE > buf_size)
                        dev->data->scattered_rx = 1;
                if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                        rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
@@ -4826,9 +4841,9 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
         * VF packets received can work in all cases.
         */
        if (txgbevf_rlpml_set_vf(hw,
-           (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
+           (uint16_t)dev->data->mtu + TXGBE_ETH_OVERHEAD)) {
                PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
-                            dev->data->dev_conf.rxmode.max_rx_pkt_len);
+                            dev->data->mtu + TXGBE_ETH_OVERHEAD);
                return -EINVAL;
        }
 
@@ -4890,7 +4905,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 
                if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
                    /* It adds dual VLAN length for supporting dual VLAN */
-                   (rxmode->max_rx_pkt_len +
+                   (dev->data->mtu + TXGBE_ETH_OVERHEAD +
                                2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
                        if (!dev->data->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");