ethdev: remove jumbo offload flag
[dpdk.git] / drivers / net / igc / igc_txrx.c
index 5b269b6..56132e8 100644 (file)
@@ -3,8 +3,9 @@
  */
 
 #include <rte_config.h>
+#include <rte_flow.h>
 #include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_net.h>
 
 #include "igc_logs.h"
@@ -715,14 +716,13 @@ igc_rx_queue_release(struct igc_rx_queue *rxq)
        rte_free(rxq);
 }
 
-void eth_igc_rx_queue_release(void *rxq)
+void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       if (rxq)
-               igc_rx_queue_release(rxq);
+       if (dev->data->rx_queues[qid])
+               igc_rx_queue_release(dev->data->rx_queues[qid]);
 }
 
-uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
-               uint16_t rx_queue_id)
+uint32_t eth_igc_rx_queue_count(void *rx_queue)
 {
        /**
         * Check the DD bit of a rx descriptor of each 4 in a group,
@@ -735,7 +735,7 @@ uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
        struct igc_rx_queue *rxq;
        uint16_t desc = 0;
 
-       rxq = dev->data->rx_queues[rx_queue_id];
+       rxq = rx_queue;
        rxdp = &rxq->rx_ring[rxq->rx_tail];
 
        while (desc < rxq->nb_rx_desc - rxq->rx_tail) {
@@ -756,24 +756,6 @@ uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
        return desc;
 }
 
-int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset)
-{
-       volatile union igc_adv_rx_desc *rxdp;
-       struct igc_rx_queue *rxq = rx_queue;
-       uint32_t desc;
-
-       if (unlikely(!rxq || offset >= rxq->nb_rx_desc))
-               return 0;
-
-       desc = rxq->rx_tail + offset;
-       if (desc >= rxq->nb_rx_desc)
-               desc -= rxq->nb_rx_desc;
-
-       rxdp = &rxq->rx_ring[desc];
-       return !!(rxdp->wb.upper.status_error &
-                       rte_cpu_to_le_32(IGC_RXD_STAT_DD));
-}
-
 int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset)
 {
        struct igc_rx_queue *rxq = rx_queue;
@@ -1035,10 +1017,8 @@ igc_clear_rss_filter(struct rte_eth_dev *dev)
 {
        struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
 
-       if (!rss_filter->enable) {
-               PMD_DRV_LOG(WARNING, "RSS filter not enabled!");
+       if (!rss_filter->enable)
                return;
-       }
 
        /* recover default RSS configuration */
        igc_rss_configure(dev);
@@ -1082,7 +1062,7 @@ igc_rx_init(struct rte_eth_dev *dev)
        struct igc_rx_queue *rxq;
        struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
        uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
-       uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       uint32_t max_rx_pktlen;
        uint32_t rctl;
        uint32_t rxcsum;
        uint16_t buf_size;
@@ -1100,17 +1080,17 @@ igc_rx_init(struct rte_eth_dev *dev)
        IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
 
        /* Configure support of jumbo frames, if any. */
-       if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+       if (dev->data->mtu & RTE_ETHER_MTU)
                rctl |= IGC_RCTL_LPE;
-
-               /*
-                * Set maximum packet length by default, and might be updated
-                * together with enabling/disabling dual VLAN.
-                */
-               IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pkt_len);
-       } else {
+       else
                rctl &= ~IGC_RCTL_LPE;
-       }
+
+       max_rx_pktlen = dev->data->mtu + IGC_ETH_OVERHEAD;
+       /*
+        * Set maximum packet length by default, and might be updated
+        * together with enabling/disabling dual VLAN.
+        */
+       IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pktlen);
 
        /* Configure and enable each RX queue. */
        rctl_bsize = 0;
@@ -1169,7 +1149,7 @@ igc_rx_init(struct rte_eth_dev *dev)
                                        IGC_SRRCTL_BSIZEPKT_SHIFT);
 
                        /* It adds dual VLAN length for supporting dual VLAN */
-                       if (max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buf_size)
+                       if (max_rx_pktlen > buf_size)
                                dev->data->scattered_rx = 1;
                } else {
                        /*
@@ -1292,20 +1272,24 @@ igc_rx_init(struct rte_eth_dev *dev)
         * This needs to be done after enable.
         */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               uint32_t dvmolr;
+
                rxq = dev->data->rx_queues[i];
                IGC_WRITE_REG(hw, IGC_RDH(rxq->reg_idx), 0);
-               IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx),
-                               rxq->nb_rx_desc - 1);
+               IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
 
-               /* strip queue vlan offload */
-               if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
-                       uint32_t dvmolr;
-                       dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->queue_id));
+               dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
+               if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+                       dvmolr |= IGC_DVMOLR_STRVLAN;
+               else
+                       dvmolr &= ~IGC_DVMOLR_STRVLAN;
 
-                       /* If vlan been stripped off, the CRC is meaningless. */
-                       dvmolr |= IGC_DVMOLR_STRVLAN | IGC_DVMOLR_STRCRC;
-                       IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr);
-               }
+               if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+                       dvmolr &= ~IGC_DVMOLR_STRCRC;
+               else
+                       dvmolr |= IGC_DVMOLR_STRCRC;
+
+               IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr);
        }
 
        return 0;
@@ -1436,7 +1420,7 @@ eth_igc_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
                        return i;
                }
 
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
                ret = rte_validate_tx_offload(m);
                if (ret != 0) {
                        rte_errno = -ret;
@@ -1896,10 +1880,10 @@ igc_tx_queue_release(struct igc_tx_queue *txq)
        rte_free(txq);
 }
 
-void eth_igc_tx_queue_release(void *txq)
+void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       if (txq)
-               igc_tx_queue_release(txq);
+       if (dev->data->tx_queues[qid])
+               igc_tx_queue_release(dev->data->tx_queues[qid]);
 }
 
 static void
@@ -2268,12 +2252,10 @@ eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
 
        reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
        if (on) {
-               /* If vlan been stripped off, the CRC is meaningless. */
-               reg_val |= IGC_DVMOLR_STRVLAN | IGC_DVMOLR_STRCRC;
+               reg_val |= IGC_DVMOLR_STRVLAN;
                rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
        } else {
-               reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN |
-                               IGC_DVMOLR_STRCRC);
+               reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
                rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
        }