common/mlx5: share protection domain object
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
index f5fab0f..cf4fbd3 100644 (file)
@@ -6,15 +6,17 @@
 #include <unistd.h>
 
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_interrupts.h>
 #include <rte_alarm.h>
+#include <rte_cycles.h>
 
 #include <mlx5_malloc.h>
 
 #include "mlx5.h"
 #include "mlx5_mr.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
+#include "mlx5_tx.h"
 #include "mlx5_utils.h"
 #include "rte_pmd_mlx5.h"
 
@@ -77,6 +79,7 @@ mlx5_txq_start(struct rte_eth_dev *dev)
                }
                if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
                        size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
+
                        txq_data->fcqs = mlx5_malloc(flags, size,
                                                     RTE_CACHE_LINE_SIZE,
                                                     txq_ctrl->socket);
@@ -102,6 +105,60 @@ error:
        return -rte_errno;
 }
 
+/**
+ * Translate the chunk address to MR key in order to put in into the cache.
+ */
+static void
+mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
+                            struct rte_mempool_memhdr *memhdr,
+                            unsigned int idx)
+{
+       struct mlx5_rxq_data *rxq = opaque;
+
+       RTE_SET_USED(mp);
+       RTE_SET_USED(idx);
+       mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr);
+}
+
+/**
+ * Register Rx queue mempools and fill the Rx queue cache.
+ * This function tolerates repeated mempool registration.
+ *
+ * @param[in] rxq_ctrl
+ *   Rx queue control data.
+ *
+ * @return
+ *   0 on success, (-1) on failure and rte_errno is set.
+ */
+static int
+mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+       struct mlx5_priv *priv = rxq_ctrl->priv;
+       struct rte_mempool *mp;
+       uint32_t s;
+       int ret = 0;
+
+       mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
+       /* MPRQ mempool is registered on creation, just fill the cache. */
+       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+               rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp,
+                                    mlx5_rxq_mempool_register_cb,
+                                    &rxq_ctrl->rxq);
+               return 0;
+       }
+       for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
+               mp = rxq_ctrl->rxq.rxseg[s].mp;
+               ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
+                                              priv->sh->cdev->pd, mp,
+                                              &priv->mp_id);
+               if (ret < 0 && rte_errno != EEXIST)
+                       return ret;
+               rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
+                                    &rxq_ctrl->rxq);
+       }
+       return 0;
+}
+
 /**
  * Stop traffic on Rx queues.
  *
@@ -149,18 +206,13 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
                if (!rxq_ctrl)
                        continue;
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-                       /* Pre-register Rx mempools. */
-                       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
-                               mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
-                                                 rxq_ctrl->rxq.mprq_mp);
-                       } else {
-                               uint32_t s;
-
-                               for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
-                                       mlx5_mr_update_mp
-                                               (dev, &rxq_ctrl->rxq.mr_ctrl,
-                                               rxq_ctrl->rxq.rxseg[s].mp);
-                       }
+                       /*
+                        * Pre-register the mempools. Regardless of whether
+                        * the implicit registration is enabled or not,
+                        * Rx mempool destruction is tracked to free MRs.
+                        */
+                       if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
+                               goto error;
                        ret = rxq_alloc_elts(rxq_ctrl);
                        if (ret)
                                goto error;
@@ -225,12 +277,11 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                txq_ctrl = mlx5_txq_get(dev, i);
                if (!txq_ctrl)
                        continue;
-               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+                   txq_ctrl->hairpin_conf.peers[0].port != self_port) {
                        mlx5_txq_release(dev, i);
                        continue;
                }
-               if (txq_ctrl->hairpin_conf.peers[0].port != self_port)
-                       continue;
                if (txq_ctrl->hairpin_conf.manual_bind) {
                        mlx5_txq_release(dev, i);
                        return 0;
@@ -244,13 +295,12 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                txq_ctrl = mlx5_txq_get(dev, i);
                if (!txq_ctrl)
                        continue;
-               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+               /* Skip hairpin queues with other peer ports. */
+               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+                   txq_ctrl->hairpin_conf.peers[0].port != self_port) {
                        mlx5_txq_release(dev, i);
                        continue;
                }
-               /* Skip hairpin queues with other peer ports. */
-               if (txq_ctrl->hairpin_conf.peers[0].port != self_port)
-                       continue;
                if (!txq_ctrl->obj) {
                        rte_errno = ENOMEM;
                        DRV_LOG(ERR, "port %u no txq object found: %d",
@@ -694,7 +744,7 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
        uint32_t explicit;
        uint16_t rx_queue;
 
-       if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
+       if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
                rte_errno = ENODEV;
                DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
                return -rte_errno;
@@ -832,7 +882,7 @@ mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
        int ret;
        uint16_t cur_port = priv->dev_data->port_id;
 
-       if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
+       if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
                rte_errno = ENODEV;
                DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
                return -rte_errno;
@@ -890,7 +940,6 @@ mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
 {
        int ret = 0;
        uint16_t p, pp;
-       struct mlx5_priv *priv = dev->data->dev_private;
 
        /*
         * If the Rx port has no hairpin configuration with the current port,
@@ -899,7 +948,7 @@ mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
         * information updating.
         */
        if (rx_port == RTE_MAX_ETHPORTS) {
-               MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
+               MLX5_ETH_FOREACH_DEV(p, dev->device) {
                        ret = mlx5_hairpin_bind_single_port(dev, p);
                        if (ret != 0)
                                goto unbind;
@@ -909,7 +958,7 @@ mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
                return mlx5_hairpin_bind_single_port(dev, rx_port);
        }
 unbind:
-       MLX5_ETH_FOREACH_DEV(pp, priv->pci_dev)
+       MLX5_ETH_FOREACH_DEV(pp, dev->device)
                if (pp < p)
                        mlx5_hairpin_unbind_single_port(dev, pp);
        return ret;
@@ -924,16 +973,15 @@ mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
 {
        int ret = 0;
        uint16_t p;
-       struct mlx5_priv *priv = dev->data->dev_private;
 
        if (rx_port == RTE_MAX_ETHPORTS)
-               MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
+               MLX5_ETH_FOREACH_DEV(p, dev->device) {
                        ret = mlx5_hairpin_unbind_single_port(dev, p);
                        if (ret != 0)
                                return ret;
                }
        else
-               ret = mlx5_hairpin_bind_single_port(dev, rx_port);
+               ret = mlx5_hairpin_unbind_single_port(dev, rx_port);
        return ret;
 }
 
@@ -1065,6 +1113,12 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
+       if ((priv->sh->devx && priv->config.dv_flow_en &&
+           priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
+               ret = priv->obj_ops.lb_dummy_queue_create(dev);
+               if (ret)
+                       goto error;
+       }
        ret = mlx5_txq_start(dev);
        if (ret) {
                DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
@@ -1119,6 +1173,11 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
+       if (mlx5_dev_ctx_shared_mempool_subscribe(dev) != 0) {
+               DRV_LOG(ERR, "port %u failed to subscribe for mempool life cycle: %s",
+                       dev->data->port_id, rte_strerror(rte_errno));
+               goto error;
+       }
        rte_wmb();
        dev->tx_pkt_burst = mlx5_select_tx_function(dev);
        dev->rx_pkt_burst = mlx5_select_rx_function(dev);
@@ -1145,6 +1204,8 @@ error:
        mlx5_traffic_disable(dev);
        mlx5_txq_stop(dev);
        mlx5_rxq_stop(dev);
+       if (priv->obj_ops.lb_dummy_queue_release)
+               priv->obj_ops.lb_dummy_queue_release(dev);
        mlx5_txpp_stop(dev); /* Stop last. */
        rte_errno = ret; /* Restore rte_errno. */
        return -rte_errno;
@@ -1170,18 +1231,21 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
        rte_wmb();
        /* Disable datapath on secondary process. */
        mlx5_mp_os_req_stop_rxtx(dev);
-       usleep(1000 * priv->rxqs_n);
+       rte_delay_us_sleep(1000 * priv->rxqs_n);
        DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
        mlx5_flow_stop_default(dev);
        /* Control flows for default traffic can be removed firstly. */
        mlx5_traffic_disable(dev);
        /* All RX queue flags will be cleared in the flush interface. */
-       mlx5_flow_list_flush(dev, &priv->flows, true);
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
+       mlx5_flow_meter_rxq_flush(dev);
        mlx5_rx_intr_vec_disable(dev);
        priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
        priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
        mlx5_txq_stop(dev);
        mlx5_rxq_stop(dev);
+       if (priv->obj_ops.lb_dummy_queue_release)
+               priv->obj_ops.lb_dummy_queue_release(dev);
        mlx5_txpp_stop(dev);
 
        return 0;
@@ -1247,7 +1311,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
                }
                mlx5_txq_release(dev, i);
        }
-       if (priv->config.dv_esw_en && !priv->config.vf) {
+       if (priv->config.dv_esw_en && !priv->config.vf && !priv->config.sf) {
                if (mlx5_flow_create_esw_table_zero_flow(dev))
                        priv->fdb_def_rule = 1;
                else
@@ -1314,8 +1378,12 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
                                goto error;
                        ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
                                             &ipv6_multi_mask);
-                       if (ret)
-                               goto error;
+                       if (ret) {
+                               /* Do not fail on IPv6 broadcast creation failure. */
+                               DRV_LOG(WARNING,
+                                       "IPv6 broadcast is not supported");
+                               ret = 0;
+                       }
                }
        }
        /* Add MAC address flows. */
@@ -1352,7 +1420,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
        return 0;
 error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
-       mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
        rte_errno = ret; /* Restore rte_errno. */
        return -rte_errno;
 }
@@ -1367,9 +1435,7 @@ error:
 void
 mlx5_traffic_disable(struct rte_eth_dev *dev)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-
-       mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
 }
 
 /**