net/mlx5: set dynamic flow metadata in Rx queues
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
index cafab25..feb9154 100644 (file)
 #include <rte_alarm.h>
 
 #include "mlx5.h"
+#include "mlx5_mr.h"
 #include "mlx5_rxtx.h"
 #include "mlx5_utils.h"
+#include "rte_pmd_mlx5.h"
 
 /**
  * Stop traffic on Tx queues.
@@ -106,9 +108,12 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
        unsigned int i;
        int ret = 0;
        enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV;
+       struct mlx5_rxq_data *rxq = NULL;
 
        for (i = 0; i < priv->rxqs_n; ++i) {
-               if ((*priv->rxqs)[i]->lro) {
+               rxq = (*priv->rxqs)[i];
+
+               if (rxq && rxq->lro) {
                        obj_type =  MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
                        break;
                }
@@ -265,15 +270,23 @@ error:
 int
 mlx5_dev_start(struct rte_eth_dev *dev)
 {
-       struct mlx5_priv *priv = dev->data->dev_private;
        int ret;
+       int fine_inline;
 
        DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
-       ret = mlx5_dev_configure_rss_reta(dev);
-       if (ret) {
-               DRV_LOG(ERR, "port %u reta config failed: %s",
-                       dev->data->port_id, strerror(rte_errno));
-               return -rte_errno;
+       fine_inline = rte_mbuf_dynflag_lookup
+               (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
+       if (fine_inline > 0)
+               rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
+       else
+               rte_net_mlx5_dynf_inline_mask = 0;
+       if (dev->data->nb_rx_queues > 0) {
+               ret = mlx5_dev_configure_rss_reta(dev);
+               if (ret) {
+                       DRV_LOG(ERR, "port %u reta config failed: %s",
+                               dev->data->port_id, strerror(rte_errno));
+                       return -rte_errno;
+               }
        }
        ret = mlx5_txq_start(dev);
        if (ret) {
@@ -295,6 +308,7 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                mlx5_txq_stop(dev);
                return -rte_errno;
        }
+       /* Set started flag here for the following steps like control flow. */
        dev->data->dev_started = 1;
        ret = mlx5_rx_intr_vec_enable(dev);
        if (ret) {
@@ -305,14 +319,21 @@ mlx5_dev_start(struct rte_eth_dev *dev)
        mlx5_stats_init(dev);
        ret = mlx5_traffic_enable(dev);
        if (ret) {
-               DRV_LOG(DEBUG, "port %u failed to set defaults flows",
+               DRV_LOG(ERR, "port %u failed to set defaults flows",
                        dev->data->port_id);
                goto error;
        }
-       ret = mlx5_flow_start(dev, &priv->flows);
+       /* Set a mask and offset of dynamic metadata flows into Rx queues*/
+       mlx5_flow_rxq_dynf_metadata_set(dev);
+       /*
+        * In non-cached mode, it only needs to start the default mreg copy
+        * action and no flow created by application exists anymore.
+        * But it is worth wrapping the interface for further usage.
+        */
+       ret = mlx5_flow_start_default(dev);
        if (ret) {
-               DRV_LOG(DEBUG, "port %u failed to set flows",
-                       dev->data->port_id);
+               DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
+                       dev->data->port_id, strerror(rte_errno));
                goto error;
        }
        rte_wmb();
@@ -326,7 +347,7 @@ error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
        /* Rollback. */
        dev->data->dev_started = 0;
-       mlx5_flow_stop(dev, &priv->flows);
+       mlx5_flow_stop_default(dev);
        mlx5_traffic_disable(dev);
        mlx5_txq_stop(dev);
        mlx5_rxq_stop(dev);
@@ -356,8 +377,11 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
        mlx5_mp_req_stop_rxtx(dev);
        usleep(1000 * priv->rxqs_n);
        DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
-       mlx5_flow_stop(dev, &priv->flows);
+       mlx5_flow_stop_default(dev);
+       /* Control flows for default traffic can be removed firstly. */
        mlx5_traffic_disable(dev);
+       /* All RX queue flags will be cleared in the flush interface. */
+       mlx5_flow_list_flush(dev, &priv->flows, true);
        mlx5_rx_intr_vec_disable(dev);
        mlx5_dev_interrupt_handler_uninstall(dev);
        mlx5_txq_stop(dev);
@@ -420,9 +444,14 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
                }
                mlx5_txq_release(dev, i);
        }
-       if (priv->config.dv_esw_en && !priv->config.vf)
-               if (!mlx5_flow_create_esw_table_zero_flow(dev))
-                       goto error;
+       if (priv->config.dv_esw_en && !priv->config.vf) {
+               if (mlx5_flow_create_esw_table_zero_flow(dev))
+                       priv->fdb_def_rule = 1;
+               else
+                       DRV_LOG(INFO, "port %u FDB default rule cannot be"
+                               " configured - only Eswitch group 0 flows are"
+                               " supported.", dev->data->port_id);
+       }
        if (priv->isolated)
                return 0;
        if (dev->data->promiscuous) {
@@ -511,7 +540,7 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
        return 0;
 error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
-       mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+       mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
        rte_errno = ret; /* Restore rte_errno. */
        return -rte_errno;
 }
@@ -528,7 +557,7 @@ mlx5_traffic_disable(struct rte_eth_dev *dev)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
 
-       mlx5_flow_list_flush(dev, &priv->ctrl_flows);
+       mlx5_flow_list_flush(dev, &priv->ctrl_flows, false);
 }
 
 /**