net/mlx5: improve stack usage during link update
[dpdk.git] / drivers / net / mlx5 / mlx5_ethdev.c
index 08cc814..53a23ab 100644 (file)
 #include <linux/sockios.h>
 #include <linux/version.h>
 #include <fcntl.h>
+#include <stdalign.h>
 
-/* DPDK headers don't like -pedantic. */
-#ifdef PEDANTIC
-#pragma GCC diagnostic ignored "-Wpedantic"
-#endif
 #include <rte_atomic.h>
 #include <rte_ethdev.h>
 #include <rte_mbuf.h>
@@ -61,9 +58,6 @@
 #include <rte_interrupts.h>
 #include <rte_alarm.h>
 #include <rte_malloc.h>
-#ifdef PEDANTIC
-#pragma GCC diagnostic error "-Wpedantic"
-#endif
 
 #include "mlx5.h"
 #include "mlx5_rxtx.h"
@@ -119,7 +113,6 @@ struct ethtool_link_settings {
 #define ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT 38
 #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39
 #endif
-#define ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32 (SCHAR_MAX)
 
 /**
  * Return private structure associated with an Ethernet device.
@@ -133,12 +126,7 @@ struct ethtool_link_settings {
 struct priv *
 mlx5_get_priv(struct rte_eth_dev *dev)
 {
-       struct mlx5_secondary_data *sd;
-
-       if (!mlx5_is_secondary())
-               return dev->data->dev_private;
-       sd = &mlx5_secondary_data[dev->data->port_id];
-       return sd->data.dev_private;
+       return dev->data->dev_private;
 }
 
 /**
@@ -150,7 +138,7 @@ mlx5_get_priv(struct rte_eth_dev *dev)
 inline int
 mlx5_is_secondary(void)
 {
-       return rte_eal_process_type() != RTE_PROC_PRIMARY;
+       return rte_eal_process_type() == RTE_PROC_SECONDARY;
 }
 
 /**
@@ -816,12 +804,7 @@ static int
 mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
 {
        struct priv *priv = mlx5_get_priv(dev);
-       __extension__ struct {
-               struct ethtool_link_settings edata;
-               uint32_t link_mode_data[3 *
-                                       ETHTOOL_LINK_MODE_MASK_MAX_KERNEL_NU32];
-       } ecmd;
-
+       struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
        struct ifreq ifr;
        struct rte_eth_link dev_link;
        uint64_t sc;
@@ -834,23 +817,29 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
        memset(&dev_link, 0, sizeof(dev_link));
        dev_link.link_status = ((ifr.ifr_flags & IFF_UP) &&
                                (ifr.ifr_flags & IFF_RUNNING));
-       memset(&ecmd, 0, sizeof(ecmd));
-       ecmd.edata.cmd = ETHTOOL_GLINKSETTINGS;
-       ifr.ifr_data = (void *)&ecmd;
+       ifr.ifr_data = (void *)&gcmd;
        if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
                DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
                      strerror(errno));
                return -1;
        }
-       ecmd.edata.link_mode_masks_nwords = -ecmd.edata.link_mode_masks_nwords;
+       gcmd.link_mode_masks_nwords = -gcmd.link_mode_masks_nwords;
+
+       alignas(struct ethtool_link_settings)
+       uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
+                    sizeof(uint32_t) * gcmd.link_mode_masks_nwords * 3];
+       struct ethtool_link_settings *ecmd = (void *)data;
+
+       *ecmd = gcmd;
+       ifr.ifr_data = (void *)ecmd;
        if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) {
                DEBUG("ioctl(SIOCETHTOOL, ETHTOOL_GLINKSETTINGS) failed: %s",
                      strerror(errno));
                return -1;
        }
-       dev_link.link_speed = ecmd.edata.speed;
-       sc = ecmd.edata.link_mode_masks[0] |
-               ((uint64_t)ecmd.edata.link_mode_masks[1] << 32);
+       dev_link.link_speed = ecmd->speed;
+       sc = ecmd->link_mode_masks[0] |
+               ((uint64_t)ecmd->link_mode_masks[1] << 32);
        priv->link_speed_capa = 0;
        if (sc & ETHTOOL_LINK_MODE_Autoneg_BIT)
                priv->link_speed_capa |= ETH_LINK_SPEED_AUTONEG;
@@ -886,7 +875,7 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
                  ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT |
                  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT))
                priv->link_speed_capa |= ETH_LINK_SPEED_100G;
-       dev_link.link_duplex = ((ecmd.edata.duplex == DUPLEX_HALF) ?
+       dev_link.link_duplex = ((ecmd->duplex == DUPLEX_HALF) ?
                                ETH_LINK_HALF_DUPLEX : ETH_LINK_FULL_DUPLEX);
        dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
                                  ETH_LINK_SPEED_FIXED);
@@ -924,12 +913,6 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
 /**
  * DPDK callback to change the MTU.
  *
- * Setting the MTU affects hardware MRU (packets larger than the MTU cannot be
- * received). Use this as a hint to enable/disable scattered packets support
- * and improve performance when not needed.
- * Since failure is not an option, reconfiguring queues on the fly is not
- * recommended.
- *
  * @param dev
  *   Pointer to Ethernet device structure.
  * @param in_mtu
@@ -942,122 +925,33 @@ int
 mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct priv *priv = dev->data->dev_private;
+       uint16_t kern_mtu;
        int ret = 0;
-       unsigned int i;
-       unsigned int max_frame_len;
-       int rehash;
-       int restart = priv->started;
 
        if (mlx5_is_secondary())
                return -E_RTE_SECONDARY;
 
        priv_lock(priv);
+       ret = priv_get_mtu(priv, &kern_mtu);
+       if (ret)
+               goto out;
        /* Set kernel interface MTU first. */
-       if (priv_set_mtu(priv, mtu)) {
-               ret = errno;
-               WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
-                    strerror(ret));
+       ret = priv_set_mtu(priv, mtu);
+       if (ret)
+               goto out;
+       ret = priv_get_mtu(priv, &kern_mtu);
+       if (ret)
                goto out;
-       } else
+       if (kern_mtu == mtu) {
+               priv->mtu = mtu;
                DEBUG("adapter port %u MTU set to %u", priv->port, mtu);
-       /* Temporarily replace RX handler with a fake one, assuming it has not
-        * been copied elsewhere. */
-       dev->rx_pkt_burst = removed_rx_burst;
-       /* Make sure everyone has left dev->rx_pkt_burst() and uses
-        * removed_rx_burst() instead. */
-       rte_wmb();
-       usleep(1000);
-       /* MTU does not include header and CRC. */
-       max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN;
-       /* Check if at least one queue is going to need a SGE update. */
-       for (i = 0; i != priv->rxqs_n; ++i) {
-               struct rxq *rxq = (*priv->rxqs)[i];
-               unsigned int mb_len;
-               unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len;
-               unsigned int sges_n;
-
-               if (rxq == NULL)
-                       continue;
-               mb_len = rte_pktmbuf_data_room_size(rxq->mp);
-               assert(mb_len >= RTE_PKTMBUF_HEADROOM);
-               /*
-                * Determine the number of SGEs needed for a full packet
-                * and round it to the next power of two.
-                */
-               sges_n = log2above((size / mb_len) + !!(size % mb_len));
-               if (sges_n != rxq->sges_n)
-                       break;
-       }
-       /*
-        * If all queues have the right number of SGEs, a simple rehash
-        * of their buffers is enough, otherwise SGE information can only
-        * be updated in a queue by recreating it. All resources that depend
-        * on queues (flows, indirection tables) must be recreated as well in
-        * that case.
-        */
-       rehash = (i == priv->rxqs_n);
-       if (!rehash) {
-               /* Clean up everything as with mlx5_dev_stop(). */
-               priv_special_flow_disable_all(priv);
-               priv_mac_addrs_disable(priv);
-               priv_destroy_hash_rxqs(priv);
-               priv_fdir_disable(priv);
-               priv_dev_interrupt_handler_uninstall(priv, dev);
-       }
-recover:
-       /* Reconfigure each RX queue. */
-       for (i = 0; (i != priv->rxqs_n); ++i) {
-               struct rxq *rxq = (*priv->rxqs)[i];
-               struct rxq_ctrl *rxq_ctrl =
-                       container_of(rxq, struct rxq_ctrl, rxq);
-               unsigned int mb_len;
-               unsigned int tmp;
-
-               if (rxq == NULL)
-                       continue;
-               mb_len = rte_pktmbuf_data_room_size(rxq->mp);
-               assert(mb_len >= RTE_PKTMBUF_HEADROOM);
-               /* Provide new values to rxq_setup(). */
-               dev->data->dev_conf.rxmode.jumbo_frame =
-                       (max_frame_len > ETHER_MAX_LEN);
-               dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len;
-               if (rehash)
-                       ret = rxq_rehash(dev, rxq_ctrl);
-               else
-                       ret = rxq_ctrl_setup(dev, rxq_ctrl, 1 << rxq->elts_n,
-                                            rxq_ctrl->socket, NULL, rxq->mp);
-               if (!ret)
-                       continue;
-               /* Attempt to roll back in case of error. */
-               tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM;
-               if (max_frame_len != tmp) {
-                       max_frame_len = tmp;
-                       goto recover;
-               }
-               /* Double fault, disable RX. */
-               break;
-       }
-       /* Mimic mlx5_dev_start(). */
-       if (ret) {
-               ERROR("unable to reconfigure RX queues, RX disabled");
-       } else if (restart &&
-                  !rehash &&
-                  !priv_create_hash_rxqs(priv) &&
-                  !priv_rehash_flows(priv)) {
-               if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE)
-                       priv_fdir_enable(priv);
-               priv_dev_interrupt_handler_install(priv, dev);
        }
-       priv->mtu = mtu;
-       /* Burst functions can now be called again. */
-       rte_wmb();
-       /*
-        * Use a safe RX burst function in case of error, otherwise select RX
-        * burst function again.
-        */
-       if (!ret)
-               priv_select_rx_function(priv);
+       priv_unlock(priv);
+       return 0;
 out:
+       ret = errno;
+       WARN("cannot set port %u MTU to %u: %s", priv->port, mtu,
+            strerror(ret));
        priv_unlock(priv);
        assert(ret >= 0);
        return -ret;
@@ -1437,163 +1331,6 @@ mlx5_set_link_up(struct rte_eth_dev *dev)
        return err;
 }
 
-/**
- * Configure secondary process queues from a private data pointer (primary
- * or secondary) and update burst callbacks. Can take place only once.
- *
- * All queues must have been previously created by the primary process to
- * avoid undefined behavior.
- *
- * @param priv
- *   Private data pointer from either primary or secondary process.
- *
- * @return
- *   Private data pointer from secondary process, NULL in case of error.
- */
-struct priv *
-mlx5_secondary_data_setup(struct priv *priv)
-{
-       unsigned int port_id = 0;
-       struct mlx5_secondary_data *sd;
-       void **tx_queues;
-       void **rx_queues;
-       unsigned int nb_tx_queues;
-       unsigned int nb_rx_queues;
-       unsigned int i;
-
-       /* priv must be valid at this point. */
-       assert(priv != NULL);
-       /* priv->dev must also be valid but may point to local memory from
-        * another process, possibly with the same address and must not
-        * be dereferenced yet. */
-       assert(priv->dev != NULL);
-       /* Determine port ID by finding out where priv comes from. */
-       while (1) {
-               sd = &mlx5_secondary_data[port_id];
-               rte_spinlock_lock(&sd->lock);
-               /* Primary process? */
-               if (sd->primary_priv == priv)
-                       break;
-               /* Secondary process? */
-               if (sd->data.dev_private == priv)
-                       break;
-               rte_spinlock_unlock(&sd->lock);
-               if (++port_id == RTE_DIM(mlx5_secondary_data))
-                       port_id = 0;
-       }
-       /* Switch to secondary private structure. If private data has already
-        * been updated by another thread, there is nothing else to do. */
-       priv = sd->data.dev_private;
-       if (priv->dev->data == &sd->data)
-               goto end;
-       /* Sanity checks. Secondary private structure is supposed to point
-        * to local eth_dev, itself still pointing to the shared device data
-        * structure allocated by the primary process. */
-       assert(sd->shared_dev_data != &sd->data);
-       assert(sd->data.nb_tx_queues == 0);
-       assert(sd->data.tx_queues == NULL);
-       assert(sd->data.nb_rx_queues == 0);
-       assert(sd->data.rx_queues == NULL);
-       assert(priv != sd->primary_priv);
-       assert(priv->dev->data == sd->shared_dev_data);
-       assert(priv->txqs_n == 0);
-       assert(priv->txqs == NULL);
-       assert(priv->rxqs_n == 0);
-       assert(priv->rxqs == NULL);
-       nb_tx_queues = sd->shared_dev_data->nb_tx_queues;
-       nb_rx_queues = sd->shared_dev_data->nb_rx_queues;
-       /* Allocate local storage for queues. */
-       tx_queues = rte_zmalloc("secondary ethdev->tx_queues",
-                               sizeof(sd->data.tx_queues[0]) * nb_tx_queues,
-                               RTE_CACHE_LINE_SIZE);
-       rx_queues = rte_zmalloc("secondary ethdev->rx_queues",
-                               sizeof(sd->data.rx_queues[0]) * nb_rx_queues,
-                               RTE_CACHE_LINE_SIZE);
-       if (tx_queues == NULL || rx_queues == NULL)
-               goto error;
-       /* Lock to prevent control operations during setup. */
-       priv_lock(priv);
-       /* TX queues. */
-       for (i = 0; i != nb_tx_queues; ++i) {
-               struct txq *primary_txq = (*sd->primary_priv->txqs)[i];
-               struct txq_ctrl *primary_txq_ctrl;
-               struct txq_ctrl *txq_ctrl;
-
-               if (primary_txq == NULL)
-                       continue;
-               primary_txq_ctrl = container_of(primary_txq,
-                                               struct txq_ctrl, txq);
-               txq_ctrl = rte_calloc_socket("TXQ", 1, sizeof(*txq_ctrl) +
-                                            (1 << primary_txq->elts_n) *
-                                            sizeof(struct rte_mbuf *), 0,
-                                            primary_txq_ctrl->socket);
-               if (txq_ctrl != NULL) {
-                       if (txq_ctrl_setup(priv->dev,
-                                          txq_ctrl,
-                                          1 << primary_txq->elts_n,
-                                          primary_txq_ctrl->socket,
-                                          NULL) == 0) {
-                               txq_ctrl->txq.stats.idx =
-                                       primary_txq->stats.idx;
-                               tx_queues[i] = &txq_ctrl->txq;
-                               continue;
-                       }
-                       rte_free(txq_ctrl);
-               }
-               while (i) {
-                       txq_ctrl = tx_queues[--i];
-                       txq_cleanup(txq_ctrl);
-                       rte_free(txq_ctrl);
-               }
-               goto error;
-       }
-       /* RX queues. */
-       for (i = 0; i != nb_rx_queues; ++i) {
-               struct rxq_ctrl *primary_rxq =
-                       container_of((*sd->primary_priv->rxqs)[i],
-                                    struct rxq_ctrl, rxq);
-
-               if (primary_rxq == NULL)
-                       continue;
-               /* Not supported yet. */
-               rx_queues[i] = NULL;
-       }
-       /* Update everything. */
-       priv->txqs = (void *)tx_queues;
-       priv->txqs_n = nb_tx_queues;
-       priv->rxqs = (void *)rx_queues;
-       priv->rxqs_n = nb_rx_queues;
-       sd->data.rx_queues = rx_queues;
-       sd->data.tx_queues = tx_queues;
-       sd->data.nb_rx_queues = nb_rx_queues;
-       sd->data.nb_tx_queues = nb_tx_queues;
-       sd->data.dev_link = sd->shared_dev_data->dev_link;
-       sd->data.mtu = sd->shared_dev_data->mtu;
-       memcpy(sd->data.rx_queue_state, sd->shared_dev_data->rx_queue_state,
-              sizeof(sd->data.rx_queue_state));
-       memcpy(sd->data.tx_queue_state, sd->shared_dev_data->tx_queue_state,
-              sizeof(sd->data.tx_queue_state));
-       sd->data.dev_flags = sd->shared_dev_data->dev_flags;
-       /* Use local data from now on. */
-       rte_mb();
-       priv->dev->data = &sd->data;
-       rte_mb();
-       priv_select_tx_function(priv);
-       priv_select_rx_function(priv);
-       priv_unlock(priv);
-end:
-       /* More sanity checks. */
-       assert(priv->dev->data == &sd->data);
-       rte_spinlock_unlock(&sd->lock);
-       return priv;
-error:
-       priv_unlock(priv);
-       rte_free(tx_queues);
-       rte_free(rx_queues);
-       rte_spinlock_unlock(&sd->lock);
-       return NULL;
-}
-
 /**
  * Configure the TX function to use.
  *
@@ -1635,7 +1372,6 @@ void
 priv_select_rx_function(struct priv *priv)
 {
        if (priv_check_vec_rx_support(priv) > 0) {
-               priv_prep_vec_rx_function(priv);
                priv->dev->rx_pkt_burst = mlx5_rx_burst_vec;
                DEBUG("selected RX vectorized function");
        } else {