net/mlx5: add C++ include guard to public header
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
index a124f74..74c9c0a 100644 (file)
@@ -105,21 +105,6 @@ error:
        return -rte_errno;
 }
 
-/**
- * Translate the chunk address to MR key in order to put in into the cache.
- */
-static void
-mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
-                            struct rte_mempool_memhdr *memhdr,
-                            unsigned int idx)
-{
-       struct mlx5_rxq_data *rxq = opaque;
-
-       RTE_SET_USED(mp);
-       RTE_SET_USED(idx);
-       mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr);
-}
-
 /**
  * Register Rx queue mempools and fill the Rx queue cache.
  * This function tolerates repeated mempool registration.
@@ -133,28 +118,29 @@ mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
 static int
 mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
 {
-       struct mlx5_priv *priv = rxq_ctrl->priv;
        struct rte_mempool *mp;
        uint32_t s;
        int ret = 0;
 
        mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
        /* MPRQ mempool is registered on creation, just fill the cache. */
-       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
-               rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp,
-                                    mlx5_rxq_mempool_register_cb,
-                                    &rxq_ctrl->rxq);
-               return 0;
-       }
+       if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
+               return mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
+                                                     rxq_ctrl->rxq.mprq_mp);
        for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
+               bool is_extmem;
+
                mp = rxq_ctrl->rxq.rxseg[s].mp;
-               ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
-                                              priv->sh->cdev->pd, mp,
-                                              &priv->mp_id);
+               is_extmem = (rte_pktmbuf_priv_flags(mp) &
+                            RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0;
+               ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp,
+                                              is_extmem);
                if (ret < 0 && rte_errno != EEXIST)
                        return ret;
-               rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
-                                    &rxq_ctrl->rxq);
+               ret = mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
+                                                    mp);
+               if (ret < 0)
+                       return ret;
        }
        return 0;
 }
@@ -175,6 +161,39 @@ mlx5_rxq_stop(struct rte_eth_dev *dev)
                mlx5_rxq_release(dev, i);
 }
 
+static int
+mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,
+                     unsigned int idx)
+{
+       int ret = 0;
+
+       if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+               /*
+                * Pre-register the mempools. Regardless of whether
+                * the implicit registration is enabled or not,
+                * Rx mempool destruction is tracked to free MRs.
+                */
+               if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
+                       return -rte_errno;
+               ret = rxq_alloc_elts(rxq_ctrl);
+               if (ret)
+                       return ret;
+       }
+       MLX5_ASSERT(!rxq_ctrl->obj);
+       rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+                                   sizeof(*rxq_ctrl->obj), 0,
+                                   rxq_ctrl->socket);
+       if (!rxq_ctrl->obj) {
+               DRV_LOG(ERR, "Port %u Rx queue %u can't allocate resources.",
+                       dev->data->port_id, idx);
+               rte_errno = ENOMEM;
+               return -rte_errno;
+       }
+       DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", dev->data->port_id,
+               idx, (void *)&rxq_ctrl->obj);
+       return 0;
+}
+
 /**
  * Start traffic on Rx queues.
  *
@@ -207,38 +226,18 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
                if (rxq == NULL)
                        continue;
                rxq_ctrl = rxq->ctrl;
-               if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
-                       /*
-                        * Pre-register the mempools. Regardless of whether
-                        * the implicit registration is enabled or not,
-                        * Rx mempool destruction is tracked to free MRs.
-                        */
-                       if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
-                               goto error;
-                       ret = rxq_alloc_elts(rxq_ctrl);
-                       if (ret)
+               if (!rxq_ctrl->started) {
+                       if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0)
                                goto error;
+                       LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
                }
-               MLX5_ASSERT(!rxq_ctrl->obj);
-               rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
-                                           sizeof(*rxq_ctrl->obj), 0,
-                                           rxq_ctrl->socket);
-               if (!rxq_ctrl->obj) {
-                       DRV_LOG(ERR,
-                               "Port %u Rx queue %u can't allocate resources.",
-                               dev->data->port_id, (*priv->rxqs)[i]->idx);
-                       rte_errno = ENOMEM;
-                       goto error;
-               }
-               ret = priv->obj_ops.rxq_obj_new(dev, i);
+               ret = priv->obj_ops.rxq_obj_new(rxq);
                if (ret) {
                        mlx5_free(rxq_ctrl->obj);
                        rxq_ctrl->obj = NULL;
                        goto error;
                }
-               DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.",
-                       dev->data->port_id, i, (void *)&rxq_ctrl->obj);
-               LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
+               rxq_ctrl->started = true;
        }
        return 0;
 error:
@@ -854,7 +853,7 @@ error:
 
 /*
  * Unbind the hairpin port pair, HW configuration of both devices will be clear
- * and status will be reset for all the queues used between the them.
+ * and status will be reset for all the queues used between them.
  * This function only supports to unbind the Tx from one Rx.
  *
  * @param dev
@@ -1115,6 +1114,24 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
+       if (priv->config.std_delay_drop || priv->config.hp_delay_drop) {
+               if (!priv->config.vf && !priv->config.sf &&
+                   !priv->representor) {
+                       ret = mlx5_get_flag_dropless_rq(dev);
+                       if (ret < 0)
+                               DRV_LOG(WARNING,
+                                       "port %u cannot query dropless flag",
+                                       dev->data->port_id);
+                       else if (!ret)
+                               DRV_LOG(WARNING,
+                                       "port %u dropless_rq OFF, no rearming",
+                                       dev->data->port_id);
+               } else {
+                       DRV_LOG(DEBUG,
+                               "port %u doesn't support dropless_rq flag",
+                               dev->data->port_id);
+               }
+       }
        ret = mlx5_rxq_start(dev);
        if (ret) {
                DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
@@ -1140,6 +1157,17 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                goto error;
        }
        mlx5_os_stats_init(dev);
+       /*
+        * Attach indirection table objects detached on port stop.
+        * They may be needed to create RSS in non-isolated mode.
+        */
+       ret = mlx5_action_handle_attach(dev);
+       if (ret) {
+               DRV_LOG(ERR,
+                       "port %u failed to attach indirect actions: %s",
+                       dev->data->port_id, rte_strerror(rte_errno));
+               goto error;
+       }
        ret = mlx5_traffic_enable(dev);
        if (ret) {
                DRV_LOG(ERR, "port %u failed to set defaults flows",
@@ -1152,14 +1180,6 @@ mlx5_dev_start(struct rte_eth_dev *dev)
        mlx5_rxq_timestamp_set(dev);
        /* Set a mask and offset of scheduling on timestamp into Tx queues. */
        mlx5_txq_dynf_timestamp_set(dev);
-       /* Attach indirection table objects detached on port stop. */
-       ret = mlx5_action_handle_attach(dev);
-       if (ret) {
-               DRV_LOG(ERR,
-                       "port %u failed to attach indirect actions: %s",
-                       dev->data->port_id, rte_strerror(rte_errno));
-               goto error;
-       }
        /*
         * In non-cached mode, it only needs to start the default mreg copy
         * action and no flow created by application exists anymore.