net/softnic: fix memory leak as profile is freed
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
index cba736b..54173bf 100644 (file)
@@ -6,15 +6,17 @@
 #include <unistd.h>
 
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_interrupts.h>
 #include <rte_alarm.h>
+#include <rte_cycles.h>
 
 #include <mlx5_malloc.h>
 
 #include "mlx5.h"
 #include "mlx5_mr.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_rx.h"
+#include "mlx5_tx.h"
 #include "mlx5_utils.h"
 #include "rte_pmd_mlx5.h"
 
@@ -77,6 +79,7 @@ mlx5_txq_start(struct rte_eth_dev *dev)
                }
                if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
                        size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
+
                        txq_data->fcqs = mlx5_malloc(flags, size,
                                                     RTE_CACHE_LINE_SIZE,
                                                     txq_ctrl->socket);
@@ -218,12 +221,34 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
        struct mlx5_devx_obj *rq;
        unsigned int i;
        int ret = 0;
+       bool need_auto = false;
+       uint16_t self_port = dev->data->port_id;
 
        for (i = 0; i != priv->txqs_n; ++i) {
                txq_ctrl = mlx5_txq_get(dev, i);
                if (!txq_ctrl)
                        continue;
-               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+                   txq_ctrl->hairpin_conf.peers[0].port != self_port) {
+                       mlx5_txq_release(dev, i);
+                       continue;
+               }
+               if (txq_ctrl->hairpin_conf.manual_bind) {
+                       mlx5_txq_release(dev, i);
+                       return 0;
+               }
+               need_auto = true;
+               mlx5_txq_release(dev, i);
+       }
+       if (!need_auto)
+               return 0;
+       for (i = 0; i != priv->txqs_n; ++i) {
+               txq_ctrl = mlx5_txq_get(dev, i);
+               if (!txq_ctrl)
+                       continue;
+               /* Skip hairpin queues with other peer ports. */
+               if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+                   txq_ctrl->hairpin_conf.peers[0].port != self_port) {
                        mlx5_txq_release(dev, i);
                        continue;
                }
@@ -275,6 +300,9 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
                ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
                if (ret)
                        goto error;
+               /* Qs with auto-bind will be destroyed directly. */
+               rxq_ctrl->hairpin_status = 1;
+               txq_ctrl->hairpin_status = 1;
                mlx5_txq_release(dev, i);
                mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
        }
@@ -667,7 +695,7 @@ mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
        uint32_t explicit;
        uint16_t rx_queue;
 
-       if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
+       if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
                rte_errno = ENODEV;
                DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
                return -rte_errno;
@@ -805,7 +833,7 @@ mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
        int ret;
        uint16_t cur_port = priv->dev_data->port_id;
 
-       if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
+       if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
                rte_errno = ENODEV;
                DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
                return -rte_errno;
@@ -863,7 +891,6 @@ mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
 {
        int ret = 0;
        uint16_t p, pp;
-       struct mlx5_priv *priv = dev->data->dev_private;
 
        /*
         * If the Rx port has no hairpin configuration with the current port,
@@ -872,7 +899,7 @@ mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
         * information updating.
         */
        if (rx_port == RTE_MAX_ETHPORTS) {
-               MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
+               MLX5_ETH_FOREACH_DEV(p, dev->device) {
                        ret = mlx5_hairpin_bind_single_port(dev, p);
                        if (ret != 0)
                                goto unbind;
@@ -882,7 +909,7 @@ mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
                return mlx5_hairpin_bind_single_port(dev, rx_port);
        }
 unbind:
-       MLX5_ETH_FOREACH_DEV(pp, priv->pci_dev)
+       MLX5_ETH_FOREACH_DEV(pp, dev->device)
                if (pp < p)
                        mlx5_hairpin_unbind_single_port(dev, pp);
        return ret;
@@ -897,16 +924,15 @@ mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
 {
        int ret = 0;
        uint16_t p;
-       struct mlx5_priv *priv = dev->data->dev_private;
 
        if (rx_port == RTE_MAX_ETHPORTS)
-               MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
+               MLX5_ETH_FOREACH_DEV(p, dev->device) {
                        ret = mlx5_hairpin_unbind_single_port(dev, p);
                        if (ret != 0)
                                return ret;
                }
        else
-               ret = mlx5_hairpin_bind_single_port(dev, rx_port);
+               ret = mlx5_hairpin_unbind_single_port(dev, rx_port);
        return ret;
 }
 
@@ -1038,6 +1064,12 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
+       if ((priv->config.devx && priv->config.dv_flow_en &&
+           priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
+               ret = priv->obj_ops.lb_dummy_queue_create(dev);
+               if (ret)
+                       goto error;
+       }
        ret = mlx5_txq_start(dev);
        if (ret) {
                DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
@@ -1050,9 +1082,13 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
+       /*
+        * Such step will be skipped if there is no hairpin TX queue configured
+        * with RX peer queue from the same device.
+        */
        ret = mlx5_hairpin_auto_bind(dev);
        if (ret) {
-               DRV_LOG(ERR, "port %u hairpin binding failed: %s",
+               DRV_LOG(ERR, "port %u hairpin auto binding failed: %s",
                        dev->data->port_id, strerror(rte_errno));
                goto error;
        }
@@ -1114,6 +1150,8 @@ error:
        mlx5_traffic_disable(dev);
        mlx5_txq_stop(dev);
        mlx5_rxq_stop(dev);
+       if (priv->obj_ops.lb_dummy_queue_release)
+               priv->obj_ops.lb_dummy_queue_release(dev);
        mlx5_txpp_stop(dev); /* Stop last. */
        rte_errno = ret; /* Restore rte_errno. */
        return -rte_errno;
@@ -1139,18 +1177,21 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
        rte_wmb();
        /* Disable datapath on secondary process. */
        mlx5_mp_os_req_stop_rxtx(dev);
-       usleep(1000 * priv->rxqs_n);
+       rte_delay_us_sleep(1000 * priv->rxqs_n);
        DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
        mlx5_flow_stop_default(dev);
        /* Control flows for default traffic can be removed firstly. */
        mlx5_traffic_disable(dev);
        /* All RX queue flags will be cleared in the flush interface. */
-       mlx5_flow_list_flush(dev, &priv->flows, true);
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
+       mlx5_flow_meter_rxq_flush(dev);
        mlx5_rx_intr_vec_disable(dev);
        priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
        priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
        mlx5_txq_stop(dev);
        mlx5_rxq_stop(dev);
+       if (priv->obj_ops.lb_dummy_queue_release)
+               priv->obj_ops.lb_dummy_queue_release(dev);
        mlx5_txpp_stop(dev);
 
        return 0;
@@ -1203,7 +1244,11 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
                struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
                if (!txq_ctrl)
                        continue;
-               if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
+               /* Only Tx implicit mode requires the default Tx flow. */
+               if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN &&
+                   txq_ctrl->hairpin_conf.tx_explicit == 0 &&
+                   txq_ctrl->hairpin_conf.peers[0].port ==
+                   priv->dev_data->port_id) {
                        ret = mlx5_ctrl_flow_source_queue(dev, i);
                        if (ret) {
                                mlx5_txq_release(dev, i);
@@ -1212,7 +1257,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
                }
                mlx5_txq_release(dev, i);
        }
-       if (priv->config.dv_esw_en && !priv->config.vf) {
+       if (priv->config.dv_esw_en && !priv->config.vf && !priv->config.sf) {
                if (mlx5_flow_create_esw_table_zero_flow(dev))
                        priv->fdb_def_rule = 1;
                else
@@ -1279,8 +1324,12 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
                                goto error;
                        ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
                                             &ipv6_multi_mask);
-                       if (ret)
-                               goto error;
+                       if (ret) {
+                               /* Do not fail on IPv6 broadcast creation failure. */
+                               DRV_LOG(WARNING,
+                                       "IPv6 broadcast is not supported");
+                               ret = 0;
+                       }
                }
        }
        /* Add MAC address flows. */
@@ -1317,7 +1366,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
        return 0;
 error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
-       mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
        rte_errno = ret; /* Restore rte_errno. */
        return -rte_errno;
 }
@@ -1332,9 +1381,7 @@ error:
 void
 mlx5_traffic_disable(struct rte_eth_dev *dev)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
-
-       mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
+       mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
 }
 
 /**