common/mlx5: share protection domain object
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
index ae7fcca..cf4fbd3 100644 (file)
@@ -105,6 +105,60 @@ error:
        return -rte_errno;
 }
 
+/**
+ * Translate the chunk address to MR key in order to put in into the cache.
+ */
+static void
+mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
+                            struct rte_mempool_memhdr *memhdr,
+                            unsigned int idx)
+{
+       struct mlx5_rxq_data *rxq = opaque;
+
+       RTE_SET_USED(mp);
+       RTE_SET_USED(idx);
+       mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr);
+}
+
+/**
+ * Register Rx queue mempools and fill the Rx queue cache.
+ * This function tolerates repeated mempool registration.
+ *
+ * @param[in] rxq_ctrl
+ *   Rx queue control data.
+ *
+ * @return
+ *   0 on success, (-1) on failure and rte_errno is set.
+ */
+static int
+mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+       struct mlx5_priv *priv = rxq_ctrl->priv;
+       struct rte_mempool *mp;
+       uint32_t s;
+       int ret = 0;
+
+       mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
+       /* MPRQ mempool is registered on creation, just fill the cache. */
+       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+               rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp,
+                                    mlx5_rxq_mempool_register_cb,
+                                    &rxq_ctrl->rxq);
+               return 0;
+       }
+       for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
+               mp = rxq_ctrl->rxq.rxseg[s].mp;
+               ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
+                                              priv->sh->cdev->pd, mp,
+                                              &priv->mp_id);
+               if (ret < 0 && rte_errno != EEXIST)
+                       return ret;
+               rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
+                                    &rxq_ctrl->rxq);
+       }
+       return 0;
+}
+
 /**
  * Stop traffic on Rx queues.
  *
@@ -152,18 +206,13 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
                if (!rxq_ctrl)
                        continue;
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-                       /* Pre-register Rx mempools. */
-                       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
-                               mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
-                                                 rxq_ctrl->rxq.mprq_mp);
-                       } else {
-                               uint32_t s;
-
-                               for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
-                                       mlx5_mr_update_mp
-                                               (dev, &rxq_ctrl->rxq.mr_ctrl,
-                                               rxq_ctrl->rxq.rxseg[s].mp);
-                       }
+                       /*
+                        * Pre-register the mempools. Regardless of whether
+                        * the implicit registration is enabled or not,
+                        * Rx mempool destruction is tracked to free MRs.
+                        */
+                       if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
+                               goto error;
                        ret = rxq_alloc_elts(rxq_ctrl);
                        if (ret)
                                goto error;
@@ -228,12 +277,11 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                txq_ctrl = mlx5_txq_get(dev, i);
                if (!txq_ctrl)
                        continue;
-               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+                   txq_ctrl->hairpin_conf.peers[0].port != self_port) {
                        mlx5_txq_release(dev, i);
                        continue;
                }
-               if (txq_ctrl->hairpin_conf.peers[0].port != self_port)
-                       continue;
                if (txq_ctrl->hairpin_conf.manual_bind) {
                        mlx5_txq_release(dev, i);
                        return 0;
@@ -247,13 +295,12 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                txq_ctrl = mlx5_txq_get(dev, i);
                if (!txq_ctrl)
                        continue;
-               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+               /* Skip hairpin queues with other peer ports. */
+               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+                   txq_ctrl->hairpin_conf.peers[0].port != self_port) {
                        mlx5_txq_release(dev, i);
                        continue;
                }
-               /* Skip hairpin queues with other peer ports. */
-               if (txq_ctrl->hairpin_conf.peers[0].port != self_port)
-                       continue;
                if (!txq_ctrl->obj) {
                        rte_errno = ENOMEM;
                        DRV_LOG(ERR, "port %u no txq object found: %d",
@@ -697,7 +744,7 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
        uint32_t explicit;
        uint16_t rx_queue;
 
-       if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
+       if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
                rte_errno = ENODEV;
                DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
                return -rte_errno;
@@ -835,7 +882,7 @@ mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
        int ret;
        uint16_t cur_port = priv->dev_data->port_id;
 
-       if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
+       if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
                rte_errno = ENODEV;
                DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
                return -rte_errno;
@@ -893,7 +940,6 @@ mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
 {
        int ret = 0;
        uint16_t p, pp;
-       struct mlx5_priv *priv = dev->data->dev_private;
 
        /*
         * If the Rx port has no hairpin configuration with the current port,
@@ -902,7 +948,7 @@ mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
         * information updating.
         */
        if (rx_port == RTE_MAX_ETHPORTS) {
-               MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
+               MLX5_ETH_FOREACH_DEV(p, dev->device) {
                        ret = mlx5_hairpin_bind_single_port(dev, p);
                        if (ret != 0)
                                goto unbind;
@@ -912,7 +958,7 @@ mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
                return mlx5_hairpin_bind_single_port(dev, rx_port);
        }
 unbind:
-       MLX5_ETH_FOREACH_DEV(pp, priv->pci_dev)
+       MLX5_ETH_FOREACH_DEV(pp, dev->device)
                if (pp < p)
                        mlx5_hairpin_unbind_single_port(dev, pp);
        return ret;
@@ -927,10 +973,9 @@ mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
 {
        int ret = 0;
        uint16_t p;
-       struct mlx5_priv *priv = dev->data->dev_private;
 
        if (rx_port == RTE_MAX_ETHPORTS)
-               MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
+               MLX5_ETH_FOREACH_DEV(p, dev->device) {
                        ret = mlx5_hairpin_unbind_single_port(dev, p);
                        if (ret != 0)
                                return ret;
@@ -1068,7 +1113,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
-       if ((priv->config.devx && priv->config.dv_flow_en &&
+       if ((priv->sh->devx && priv->config.dv_flow_en &&
            priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
                ret = priv->obj_ops.lb_dummy_queue_create(dev);
                if (ret)
@@ -1128,6 +1173,11 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
+       if (mlx5_dev_ctx_shared_mempool_subscribe(dev) != 0) {
+               DRV_LOG(ERR, "port %u failed to subscribe for mempool life cycle: %s",
+                       dev->data->port_id, rte_strerror(rte_errno));
+               goto error;
+       }
        rte_wmb();
        dev->tx_pkt_burst = mlx5_select_tx_function(dev);
        dev->rx_pkt_burst = mlx5_select_rx_function(dev);
@@ -1187,7 +1237,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
        /* Control flows for default traffic can be removed firstly. */
        mlx5_traffic_disable(dev);
        /* All RX queue flags will be cleared in the flush interface. */
-       mlx5_flow_list_flush(dev, &priv->flows, true);
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
        mlx5_flow_meter_rxq_flush(dev);
        mlx5_rx_intr_vec_disable(dev);
        priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
@@ -1261,7 +1311,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
                }
                mlx5_txq_release(dev, i);
        }
-       if (priv->config.dv_esw_en && !priv->config.vf) {
+       if (priv->config.dv_esw_en && !priv->config.vf && !priv->config.sf) {
                if (mlx5_flow_create_esw_table_zero_flow(dev))
                        priv->fdb_def_rule = 1;
                else
@@ -1370,7 +1420,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
        return 0;
 error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
-       mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
        rte_errno = ret; /* Restore rte_errno. */
        return -rte_errno;
 }
@@ -1385,9 +1435,7 @@ error:
 void
 mlx5_traffic_disable(struct rte_eth_dev *dev)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-
-       mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
 }
 
 /**