net/ngbe: support MAC filters
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
index a9d5d58..d916c8a 100644 (file)
@@ -14,7 +14,6 @@
 #include <mlx5_malloc.h>
 
 #include "mlx5.h"
-#include "mlx5_mr.h"
 #include "mlx5_rx.h"
 #include "mlx5_tx.h"
 #include "mlx5_utils.h"
@@ -105,6 +104,60 @@ error:
        return -rte_errno;
 }
 
+/**
+ * Translate the chunk address to MR key in order to put in into the cache.
+ */
+static void
+mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
+                            struct rte_mempool_memhdr *memhdr,
+                            unsigned int idx)
+{
+       struct mlx5_rxq_data *rxq = opaque;
+
+       RTE_SET_USED(mp);
+       RTE_SET_USED(idx);
+       mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr);
+}
+
+/**
+ * Register Rx queue mempools and fill the Rx queue cache.
+ * This function tolerates repeated mempool registration.
+ *
+ * @param[in] rxq_ctrl
+ *   Rx queue control data.
+ *
+ * @return
+ *   0 on success, (-1) on failure and rte_errno is set.
+ */
+static int
+mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
+{
+       struct mlx5_priv *priv = rxq_ctrl->priv;
+       struct rte_mempool *mp;
+       uint32_t s;
+       int ret = 0;
+
+       mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
+       /* MPRQ mempool is registered on creation, just fill the cache. */
+       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
+               rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp,
+                                    mlx5_rxq_mempool_register_cb,
+                                    &rxq_ctrl->rxq);
+               return 0;
+       }
+       for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
+               mp = rxq_ctrl->rxq.rxseg[s].mp;
+               ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
+                                              priv->sh->cdev->pd, mp,
+                                              &priv->mp_id);
+               if (ret < 0 && rte_errno != EEXIST)
+                       return ret;
+               rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
+                                    &rxq_ctrl->rxq);
+       }
+       return 0;
+}
+
 /**
  * Stop traffic on Rx queues.
  *
@@ -152,18 +205,13 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
                if (!rxq_ctrl)
                        continue;
                if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-                       /* Pre-register Rx mempools. */
-                       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
-                               mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
-                                                 rxq_ctrl->rxq.mprq_mp);
-                       } else {
-                               uint32_t s;
-
-                               for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
-                                       mlx5_mr_update_mp
-                                               (dev, &rxq_ctrl->rxq.mr_ctrl,
-                                               rxq_ctrl->rxq.rxseg[s].mp);
-                       }
+                       /*
+                        * Pre-register the mempools. Regardless of whether
+                        * the implicit registration is enabled or not,
+                        * Rx mempool destruction is tracked to free MRs.
+                        */
+                       if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
+                               goto error;
                        ret = rxq_alloc_elts(rxq_ctrl);
                        if (ret)
                                goto error;
@@ -182,6 +230,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
                ret = priv->obj_ops.rxq_obj_new(dev, i);
                if (ret) {
                        mlx5_free(rxq_ctrl->obj);
+                       rxq_ctrl->obj = NULL;
                        goto error;
                }
                DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.",
@@ -228,12 +277,11 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                txq_ctrl = mlx5_txq_get(dev, i);
                if (!txq_ctrl)
                        continue;
-               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+                   txq_ctrl->hairpin_conf.peers[0].port != self_port) {
                        mlx5_txq_release(dev, i);
                        continue;
                }
-               if (txq_ctrl->hairpin_conf.peers[0].port != self_port)
-                       continue;
                if (txq_ctrl->hairpin_conf.manual_bind) {
                        mlx5_txq_release(dev, i);
                        return 0;
@@ -247,13 +295,12 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                txq_ctrl = mlx5_txq_get(dev, i);
                if (!txq_ctrl)
                        continue;
-               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+               /* Skip hairpin queues with other peer ports. */
+               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+                   txq_ctrl->hairpin_conf.peers[0].port != self_port) {
                        mlx5_txq_release(dev, i);
                        continue;
                }
-               /* Skip hairpin queues with other peer ports. */
-               if (txq_ctrl->hairpin_conf.peers[0].port != self_port)
-                       continue;
                if (!txq_ctrl->obj) {
                        rte_errno = ENOMEM;
                        DRV_LOG(ERR, "port %u no txq object found: %d",
@@ -1066,7 +1113,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
-       if ((priv->config.devx && priv->config.dv_flow_en &&
+       if ((priv->sh->devx && priv->config.dv_flow_en &&
            priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
                ret = priv->obj_ops.lb_dummy_queue_create(dev);
                if (ret)
@@ -1126,12 +1173,17 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
+       if (mlx5_dev_ctx_shared_mempool_subscribe(dev) != 0) {
+               DRV_LOG(ERR, "port %u failed to subscribe for mempool life cycle: %s",
+                       dev->data->port_id, rte_strerror(rte_errno));
+               goto error;
+       }
        rte_wmb();
        dev->tx_pkt_burst = mlx5_select_tx_function(dev);
        dev->rx_pkt_burst = mlx5_select_rx_function(dev);
        /* Enable datapath on secondary process. */
        mlx5_mp_os_req_start_rxtx(dev);
-       if (priv->sh->intr_handle.fd >= 0) {
+       if (rte_intr_fd_get(priv->sh->intr_handle) >= 0) {
                priv->sh->port[priv->dev_port - 1].ih_port_id =
                                        (uint32_t)dev->data->port_id;
        } else {
@@ -1140,7 +1192,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                dev->data->dev_conf.intr_conf.lsc = 0;
                dev->data->dev_conf.intr_conf.rmv = 0;
        }
-       if (priv->sh->intr_handle_devx.fd >= 0)
+       if (rte_intr_fd_get(priv->sh->intr_handle_devx) >= 0)
                priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
                                        (uint32_t)dev->data->port_id;
        return 0;
@@ -1257,9 +1309,18 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
                                goto error;
                        }
                }
+               if ((priv->representor || priv->master) &&
+                   priv->config.dv_esw_en) {
+                       if (mlx5_flow_create_devx_sq_miss_flow(dev, i) == 0) {
+                               DRV_LOG(ERR,
+                                       "Port %u Tx queue %u SQ create representor devx default miss rule failed.",
+                                       dev->data->port_id, i);
+                               goto error;
+                       }
+               }
                mlx5_txq_release(dev, i);
        }
-       if (priv->config.dv_esw_en && !priv->config.vf && !priv->config.sf) {
+       if ((priv->master || priv->representor) && priv->config.dv_esw_en) {
                if (mlx5_flow_create_esw_table_zero_flow(dev))
                        priv->fdb_def_rule = 1;
                else