net/mlx5: separate Rx queue object creations
[dpdk.git] / drivers / net / mlx5 / mlx5_trigger.c
index feb9154..6376719 100644 (file)
@@ -59,7 +59,9 @@ mlx5_txq_start(struct rte_eth_dev *dev)
                } else {
                        txq_alloc_elts(txq_ctrl);
                        txq_ctrl->obj = mlx5_txq_obj_new
-                               (dev, i, MLX5_TXQ_OBJ_TYPE_IBV);
+                               (dev, i, priv->txpp_en ?
+                               MLX5_TXQ_OBJ_TYPE_DEVX_SQ :
+                               MLX5_TXQ_OBJ_TYPE_IBV);
                }
                if (!txq_ctrl->obj) {
                        rte_errno = ENOMEM;
@@ -107,17 +109,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
        struct mlx5_priv *priv = dev->data->dev_private;
        unsigned int i;
        int ret = 0;
-       enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV;
-       struct mlx5_rxq_data *rxq = NULL;
 
-       for (i = 0; i < priv->rxqs_n; ++i) {
-               rxq = (*priv->rxqs)[i];
-
-               if (rxq && rxq->lro) {
-                       obj_type =  MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
-                       break;
-               }
-       }
        /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
        if (mlx5_mprq_alloc_mp(dev)) {
                /* Should not release Rx queues but return immediately. */
@@ -129,32 +121,21 @@ mlx5_rxq_start(struct rte_eth_dev *dev)
 
                if (!rxq_ctrl)
                        continue;
-               if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
-                       rxq_ctrl->obj = mlx5_rxq_obj_new
-                               (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN);
-                       if (!rxq_ctrl->obj)
+               if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
+                       /* Pre-register Rx mempool. */
+                       mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+                            rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
+                       DRV_LOG(DEBUG, "port %u Rx queue %u registering mp %s"
+                               " having %u chunks", dev->data->port_id,
+                               rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks);
+                       mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
+                       ret = rxq_alloc_elts(rxq_ctrl);
+                       if (ret)
                                goto error;
-                       continue;
                }
-               /* Pre-register Rx mempool. */
-               mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
-                    rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp;
-               DRV_LOG(DEBUG,
-                       "port %u Rx queue %u registering"
-                       " mp %s having %u chunks",
-                       dev->data->port_id, rxq_ctrl->rxq.idx,
-                       mp->name, mp->nb_mem_chunks);
-               mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp);
-               ret = rxq_alloc_elts(rxq_ctrl);
-               if (ret)
-                       goto error;
-               rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type);
+               rxq_ctrl->obj = priv->obj_ops->rxq_obj_new(dev, i);
                if (!rxq_ctrl->obj)
                        goto error;
-               if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV)
-                       rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num;
-               else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ)
-                       rxq_ctrl->wqn = rxq_ctrl->obj->rq->id;
        }
        return 0;
 error:
@@ -270,6 +251,7 @@ error:
 int
 mlx5_dev_start(struct rte_eth_dev *dev)
 {
+       struct mlx5_priv *priv = dev->data->dev_private;
        int ret;
        int fine_inline;
 
@@ -288,25 +270,29 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        return -rte_errno;
                }
        }
+       ret = mlx5_txpp_start(dev);
+       if (ret) {
+               DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s",
+                       dev->data->port_id, strerror(rte_errno));
+               goto error;
+       }
        ret = mlx5_txq_start(dev);
        if (ret) {
                DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
                        dev->data->port_id, strerror(rte_errno));
-               return -rte_errno;
+               goto error;
        }
        ret = mlx5_rxq_start(dev);
        if (ret) {
                DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
                        dev->data->port_id, strerror(rte_errno));
-               mlx5_txq_stop(dev);
-               return -rte_errno;
+               goto error;
        }
        ret = mlx5_hairpin_bind(dev);
        if (ret) {
                DRV_LOG(ERR, "port %u hairpin binding failed: %s",
                        dev->data->port_id, strerror(rte_errno));
-               mlx5_txq_stop(dev);
-               return -rte_errno;
+               goto error;
        }
        /* Set started flag here for the following steps like control flow. */
        dev->data->dev_started = 1;
@@ -316,15 +302,19 @@ mlx5_dev_start(struct rte_eth_dev *dev)
                        dev->data->port_id);
                goto error;
        }
-       mlx5_stats_init(dev);
+       mlx5_os_stats_init(dev);
        ret = mlx5_traffic_enable(dev);
        if (ret) {
                DRV_LOG(ERR, "port %u failed to set defaults flows",
                        dev->data->port_id);
                goto error;
        }
-       /* Set a mask and offset of dynamic metadata flows into Rx queues*/
+       /* Set a mask and offset of dynamic metadata flows into Rx queues*/
        mlx5_flow_rxq_dynf_metadata_set(dev);
+       /* Set flags and context to convert Rx timestamps. */
+       mlx5_rxq_timestamp_set(dev);
+       /* Set a mask and offset of scheduling on timestamp into Tx queues. */
+       mlx5_txq_dynf_timestamp_set(dev);
        /*
         * In non-cached mode, it only needs to start the default mreg copy
         * action and no flow created by application exists anymore.
@@ -340,8 +330,19 @@ mlx5_dev_start(struct rte_eth_dev *dev)
        dev->tx_pkt_burst = mlx5_select_tx_function(dev);
        dev->rx_pkt_burst = mlx5_select_rx_function(dev);
        /* Enable datapath on secondary process. */
-       mlx5_mp_req_start_rxtx(dev);
-       mlx5_dev_interrupt_handler_install(dev);
+       mlx5_mp_os_req_start_rxtx(dev);
+       if (priv->sh->intr_handle.fd >= 0) {
+               priv->sh->port[priv->dev_port - 1].ih_port_id =
+                                       (uint32_t)dev->data->port_id;
+       } else {
+               DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
+                       dev->data->port_id);
+               dev->data->dev_conf.intr_conf.lsc = 0;
+               dev->data->dev_conf.intr_conf.rmv = 0;
+       }
+       if (priv->sh->intr_handle_devx.fd >= 0)
+               priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
+                                       (uint32_t)dev->data->port_id;
        return 0;
 error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
@@ -351,6 +352,7 @@ error:
        mlx5_traffic_disable(dev);
        mlx5_txq_stop(dev);
        mlx5_rxq_stop(dev);
+       mlx5_txpp_stop(dev); /* Stop last. */
        rte_errno = ret; /* Restore rte_errno. */
        return -rte_errno;
 }
@@ -374,7 +376,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
        dev->tx_pkt_burst = removed_tx_burst;
        rte_wmb();
        /* Disable datapath on secondary process. */
-       mlx5_mp_req_stop_rxtx(dev);
+       mlx5_mp_os_req_stop_rxtx(dev);
        usleep(1000 * priv->rxqs_n);
        DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
        mlx5_flow_stop_default(dev);
@@ -383,9 +385,11 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
        /* All RX queue flags will be cleared in the flush interface. */
        mlx5_flow_list_flush(dev, &priv->flows, true);
        mlx5_rx_intr_vec_disable(dev);
-       mlx5_dev_interrupt_handler_uninstall(dev);
+       priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
+       priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
        mlx5_txq_stop(dev);
        mlx5_rxq_stop(dev);
+       mlx5_txpp_stop(dev);
 }
 
 /**
@@ -452,6 +456,15 @@ mlx5_traffic_enable(struct rte_eth_dev *dev)
                                " configured - only Eswitch group 0 flows are"
                                " supported.", dev->data->port_id);
        }
+       if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
+               ret = mlx5_flow_lacp_miss(dev);
+               if (ret)
+                       DRV_LOG(INFO, "port %u LACP rule cannot be created - "
+                               "forward LACP to kernel.", dev->data->port_id);
+               else
+                       DRV_LOG(INFO, "LACP traffic will be missed in port %u."
+                               , dev->data->port_id);
+       }
        if (priv->isolated)
                return 0;
        if (dev->data->promiscuous) {