ethdev: fix max Rx packet length
[dpdk.git] / drivers / net / igc / igc_ethdev.c
index 15b63d1..2b1f2f5 100644 (file)
@@ -8,23 +8,18 @@
 #include <rte_string_fns.h>
 #include <rte_pci.h>
 #include <rte_bus_pci.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
 #include <rte_malloc.h>
 #include <rte_alarm.h>
 
 #include "igc_logs.h"
 #include "igc_txrx.h"
+#include "igc_filter.h"
+#include "igc_flow.h"
 
 #define IGC_INTEL_VENDOR_ID            0x8086
 
-/*
- * The overhead from MTU to max frame size.
- * Considering VLAN so tag needs to be counted.
- */
-#define IGC_ETH_OVERHEAD               (RTE_ETHER_HDR_LEN + \
-                                       RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
-
 #define IGC_FC_PAUSE_TIME              0x0680
 #define IGC_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
 /* External VLAN Enable bit mask */
 #define IGC_CTRL_EXT_EXT_VLAN          (1u << 26)
 
+/* Speed select */
+#define IGC_CTRL_SPEED_MASK            (7u << 8)
+#define IGC_CTRL_SPEED_2500            (6u << 8)
+
 /* External VLAN Ether Type bit mask and shift */
 #define IGC_VET_EXT                    0xFFFF0000
 #define IGC_VET_EXT_SHIFT              16
 
+/* Force EEE Auto-negotiation */
+#define IGC_EEER_EEE_FRC_AN            (1u << 28)
+
 /* Per Queue Good Packets Received Count */
 #define IGC_PQGPRC(idx)                (0x10010 + 0x100 * (idx))
 /* Per Queue Good Octets Received Count */
@@ -170,11 +172,11 @@ static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {
 
 static int eth_igc_configure(struct rte_eth_dev *dev);
 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
-static void eth_igc_stop(struct rte_eth_dev *dev);
+static int eth_igc_stop(struct rte_eth_dev *dev);
 static int eth_igc_start(struct rte_eth_dev *dev);
 static int eth_igc_set_link_up(struct rte_eth_dev *dev);
 static int eth_igc_set_link_down(struct rte_eth_dev *dev);
-static void eth_igc_close(struct rte_eth_dev *dev);
+static int eth_igc_close(struct rte_eth_dev *dev);
 static int eth_igc_reset(struct rte_eth_dev *dev);
 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
@@ -207,7 +209,7 @@ static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,
                                struct rte_eth_xstat_name *xstats_names,
                                unsigned int size);
 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
-               struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+               const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
                unsigned int limit);
 static int eth_igc_xstats_reset(struct rte_eth_dev *dev);
 static int
@@ -263,10 +265,6 @@ static const struct eth_dev_ops eth_igc_ops = {
 
        .rx_queue_setup         = eth_igc_rx_queue_setup,
        .rx_queue_release       = eth_igc_rx_queue_release,
-       .rx_queue_count         = eth_igc_rx_queue_count,
-       .rx_descriptor_done     = eth_igc_rx_descriptor_done,
-       .rx_descriptor_status   = eth_igc_rx_descriptor_status,
-       .tx_descriptor_status   = eth_igc_tx_descriptor_status,
        .tx_queue_setup         = eth_igc_tx_queue_setup,
        .tx_queue_release       = eth_igc_tx_queue_release,
        .tx_done_cleanup        = eth_igc_tx_done_cleanup,
@@ -292,6 +290,7 @@ static const struct eth_dev_ops eth_igc_ops = {
        .vlan_offload_set       = eth_igc_vlan_offload_set,
        .vlan_tpid_set          = eth_igc_vlan_tpid_set,
        .vlan_strip_queue_set   = eth_igc_vlan_strip_queue_set,
+       .flow_ops_get           = eth_igc_flow_ops_get,
 };
 
 /*
@@ -335,6 +334,9 @@ eth_igc_configure(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
+       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
        ret  = igc_check_mq_mode(dev);
        if (ret != 0)
                return ret;
@@ -534,8 +536,7 @@ eth_igc_interrupt_action(struct rte_eth_dev *dev)
                                pci_dev->addr.bus,
                                pci_dev->addr.devid,
                                pci_dev->addr.function);
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
-                               NULL);
+               rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
        }
 }
 
@@ -602,7 +603,7 @@ eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)
  *  This routine disables all traffic on the adapter by issuing a
  *  global reset on the MAC.
  */
-static void
+static int
 eth_igc_stop(struct rte_eth_dev *dev)
 {
        struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
@@ -611,6 +612,7 @@ eth_igc_stop(struct rte_eth_dev *dev)
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        struct rte_eth_link link;
 
+       dev->data->dev_started = 0;
        adapter->stopped = 1;
 
        /* disable receive and transmit */
@@ -635,6 +637,9 @@ eth_igc_stop(struct rte_eth_dev *dev)
        /* disable all wake up */
        IGC_WRITE_REG(hw, IGC_WUC, 0);
 
+       /* disable checking EEE operation in MAC loopback mode */
+       igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
+
        /* Set bit for Go Link disconnect */
        igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
                        IGC_82580_PM_GO_LINKD);
@@ -660,6 +665,8 @@ eth_igc_stop(struct rte_eth_dev *dev)
                rte_free(intr_handle->intr_vec);
                intr_handle->intr_vec = NULL;
        }
+
+       return 0;
 }
 
 /*
@@ -975,15 +982,20 @@ eth_igc_start(struct rte_eth_dev *dev)
                hw->mac.autoneg = 1;
        } else {
                int num_speeds = 0;
-               bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
 
-               /* Reset */
+               if (*speeds & ETH_LINK_SPEED_FIXED) {
+                       PMD_DRV_LOG(ERR,
+                                   "Force speed mode currently not supported");
+                       igc_dev_clear_queues(dev);
+                       return -EINVAL;
+               }
+
                hw->phy.autoneg_advertised = 0;
+               hw->mac.autoneg = 1;
 
                if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
                                ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
-                               ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
-                               ETH_LINK_SPEED_FIXED)) {
+                               ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G)) {
                        num_speeds = -1;
                        goto error_invalid_config;
                }
@@ -1011,19 +1023,8 @@ eth_igc_start(struct rte_eth_dev *dev)
                        hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
                        num_speeds++;
                }
-               if (num_speeds == 0 || (!autoneg && num_speeds > 1))
+               if (num_speeds == 0)
                        goto error_invalid_config;
-
-               /* Set/reset the mac.autoneg based on the link speed,
-                * fixed or not
-                */
-               if (!autoneg) {
-                       hw->mac.autoneg = 0;
-                       hw->mac.forced_speed_duplex =
-                                       hw->phy.autoneg_advertised;
-               } else {
-                       hw->mac.autoneg = 1;
-               }
        }
 
        igc_setup_link(hw);
@@ -1060,6 +1061,19 @@ eth_igc_start(struct rte_eth_dev *dev)
        eth_igc_rxtx_control(dev, true);
        eth_igc_link_update(dev, 0);
 
+       /* configure MAC-loopback mode */
+       if (dev->data->dev_conf.lpbk_mode == 1) {
+               uint32_t reg_val;
+
+               reg_val = IGC_READ_REG(hw, IGC_CTRL);
+               reg_val &= ~IGC_CTRL_SPEED_MASK;
+               reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD |
+                       IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500;
+               IGC_WRITE_REG(hw, IGC_CTRL, reg_val);
+
+               igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
+       }
+
        return 0;
 
 error_invalid_config:
@@ -1132,19 +1146,19 @@ igc_dev_free_queues(struct rte_eth_dev *dev)
        uint16_t i;
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
-               eth_igc_rx_queue_release(dev->data->rx_queues[i]);
+               eth_igc_rx_queue_release(dev, i);
                dev->data->rx_queues[i] = NULL;
        }
        dev->data->nb_rx_queues = 0;
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
-               eth_igc_tx_queue_release(dev->data->tx_queues[i]);
+               eth_igc_tx_queue_release(dev, i);
                dev->data->tx_queues[i] = NULL;
        }
        dev->data->nb_tx_queues = 0;
 }
 
-static void
+static int
 eth_igc_close(struct rte_eth_dev *dev)
 {
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
@@ -1152,11 +1166,17 @@ eth_igc_close(struct rte_eth_dev *dev)
        struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
        struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
        int retry = 0;
+       int ret = 0;
 
        PMD_INIT_FUNC_TRACE();
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
 
        if (!adapter->stopped)
-               eth_igc_stop(dev);
+               ret = eth_igc_stop(dev);
+
+       igc_flow_flush(dev, NULL);
+       igc_clear_all_filter(dev);
 
        igc_intr_other_disable(dev);
        do {
@@ -1175,6 +1195,8 @@ eth_igc_close(struct rte_eth_dev *dev)
 
        /* Reset any pending lock */
        igc_reset_swfw_lock(hw);
+
+       return ret;
 }
 
 static void
@@ -1198,6 +1220,9 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
        dev->dev_ops = &eth_igc_ops;
+       dev->rx_queue_count = eth_igc_rx_queue_count;
+       dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
+       dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
 
        /*
         * for secondary processes, we don't initialize any further as primary
@@ -1208,6 +1233,7 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
                return 0;
 
        rte_eth_copy_pci_info(dev, pci_dev);
+       dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        hw->back = pci_dev;
        hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
@@ -1293,11 +1319,6 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
                goto err_late;
        }
 
-       /* Pass the information to the rte_eth_dev_close() that it should also
-        * release the private port resources.
-        */
-       dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
-
        hw->mac.get_link_status = 1;
        igc->stopped = 0;
 
@@ -1325,6 +1346,8 @@ eth_igc_dev_init(struct rte_eth_dev *dev)
                igc->rxq_stats_map[i] = -1;
        }
 
+       igc_flow_init(dev);
+       igc_clear_all_filter(dev);
        return 0;
 
 err_late:
@@ -1336,10 +1359,6 @@ static int
 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
 {
        PMD_INIT_FUNC_TRACE();
-
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-               return 0;
-
        eth_igc_close(eth_dev);
        return 0;
 }
@@ -1443,9 +1462,11 @@ eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
                                 fw.eep_build);
                }
        }
+       if (ret < 0)
+               return -EINVAL;
 
        ret += 1; /* add the size of '\0' */
-       if (fw_size < (u32)ret)
+       if (fw_size < (size_t)ret)
                return ret;
        else
                return 0;
@@ -1560,32 +1581,28 @@ eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
 
        /*
-        * refuse mtu that requires the support of scattered packets when
-        * this feature has not been enabled before.
+        * If device is started, refuse mtu that requires the support of
+        * scattered packets when this feature has not been enabled before.
         */
-       if (!dev->data->scattered_rx &&
-           frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
+       if (dev->data->dev_started && !dev->data->scattered_rx &&
+           frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
+               PMD_INIT_LOG(ERR, "Stop port first.");
                return -EINVAL;
+       }
 
        rctl = IGC_READ_REG(hw, IGC_RCTL);
 
        /* switch to jumbo mode if needed */
        if (mtu > RTE_ETHER_MTU) {
-               dev->data->dev_conf.rxmode.offloads |=
-                       DEV_RX_OFFLOAD_JUMBO_FRAME;
+               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
                rctl |= IGC_RCTL_LPE;
        } else {
-               dev->data->dev_conf.rxmode.offloads &=
-                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+               dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
                rctl &= ~IGC_RCTL_LPE;
        }
        IGC_WRITE_REG(hw, IGC_RCTL, rctl);
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
-       IGC_WRITE_REG(hw, IGC_RLPML,
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
 
        return 0;
 }
@@ -1869,8 +1886,7 @@ eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
 
        /* Rx Errors */
        rte_stats->imissed = stats->mpc;
-       rte_stats->ierrors = stats->crcerrs +
-                       stats->rlec + stats->ruc + stats->roc +
+       rte_stats->ierrors = stats->crcerrs + stats->rlec +
                        stats->rxerrc + stats->algnerrc;
 
        /* Tx Errors */
@@ -1983,7 +1999,7 @@ eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 
 static int
 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
-               struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
+               const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
                unsigned int limit)
 {
        unsigned int i;
@@ -2235,6 +2251,8 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+
        /* set redirection table */
        for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
                union igc_rss_reta_reg reta, reg;
@@ -2247,7 +2265,8 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
                                IGC_RSS_RDT_REG_SIZE_MASK);
 
                /* if no need to update the register */
-               if (!mask)
+               if (!mask ||
+                   shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
                        continue;
 
                /* check mask whether need to read the register value first */
@@ -2258,6 +2277,7 @@ eth_igc_rss_reta_update(struct rte_eth_dev *dev,
                                        IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
 
                /* update the register */
+               RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
                for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
                        if (mask & (1u << j))
                                reta.bytes[j] =
@@ -2287,6 +2307,8 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
+       RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
+
        /* read redirection table */
        for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
                union igc_rss_reta_reg reta;
@@ -2299,10 +2321,12 @@ eth_igc_rss_reta_query(struct rte_eth_dev *dev,
                                IGC_RSS_RDT_REG_SIZE_MASK);
 
                /* if no need to read register */
-               if (!mask)
+               if (!mask ||
+                   shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
                        continue;
 
                /* read register and get the queue index */
+               RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
                reta.dword = IGC_READ_REG_LE_VALUE(hw,
                                IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
                for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
@@ -2448,6 +2472,7 @@ static int
 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 {
        struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+       uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
        uint32_t ctrl_ext;
 
        ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
@@ -2456,23 +2481,14 @@ igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
        if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
                return 0;
 
-       if ((dev->data->dev_conf.rxmode.offloads &
-                       DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
-               goto write_ext_vlan;
-
        /* Update maximum packet length */
-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len <
-               RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
+       if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
                PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len,
-                       VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
+                       frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
                return -EINVAL;
        }
-       dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE;
-       IGC_WRITE_REG(hw, IGC_RLPML,
-               dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE);
 
-write_ext_vlan:
        IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
        return 0;
 }
@@ -2481,6 +2497,7 @@ static int
 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 {
        struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+       uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
        uint32_t ctrl_ext;
 
        ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
@@ -2489,23 +2506,14 @@ igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
        if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
                return 0;
 
-       if ((dev->data->dev_conf.rxmode.offloads &
-                       DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
-               goto write_ext_vlan;
-
        /* Update maximum packet length */
-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
-               MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) {
+       if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) {
                PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                       VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE);
+                       frame_size, MAX_RX_JUMBO_FRAME_SIZE);
                return -EINVAL;
        }
-       dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE;
-       IGC_WRITE_REG(hw, IGC_RLPML,
-               dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
 
-write_ext_vlan:
        IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
        return 0;
 }