X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_trigger.c;h=6376719fef52eea8ef51c12391f4276576b9dca9;hb=6deb19e1b2d2;hp=ca25ad9b865e6ae255f8e8fc47fb94768fbe1002;hpb=d133f4cdb706601604d0a5eef814ef2546579aa3;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index ca25ad9b86..6376719fef 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -59,7 +59,9 @@ mlx5_txq_start(struct rte_eth_dev *dev) } else { txq_alloc_elts(txq_ctrl); txq_ctrl->obj = mlx5_txq_obj_new - (dev, i, MLX5_TXQ_OBJ_TYPE_IBV); + (dev, i, priv->txpp_en ? + MLX5_TXQ_OBJ_TYPE_DEVX_SQ : + MLX5_TXQ_OBJ_TYPE_IBV); } if (!txq_ctrl->obj) { rte_errno = ENOMEM; @@ -107,16 +109,7 @@ mlx5_rxq_start(struct rte_eth_dev *dev) struct mlx5_priv *priv = dev->data->dev_private; unsigned int i; int ret = 0; - enum mlx5_rxq_obj_type obj_type = MLX5_RXQ_OBJ_TYPE_IBV; - struct mlx5_rxq_data *rxq = NULL; - - for (i = 0; i < priv->rxqs_n; ++i) { - rxq = (*priv->rxqs)[i]; - if (rxq && rxq->lro) { - obj_type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ; - break; - } - } + /* Allocate/reuse/resize mempool for Multi-Packet RQ. */ if (mlx5_mprq_alloc_mp(dev)) { /* Should not release Rx queues but return immediately. */ @@ -128,32 +121,21 @@ mlx5_rxq_start(struct rte_eth_dev *dev) if (!rxq_ctrl) continue; - if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) { - rxq_ctrl->obj = mlx5_rxq_obj_new - (dev, i, MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN); - if (!rxq_ctrl->obj) + if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) { + /* Pre-register Rx mempool. */ + mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? + rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp; + DRV_LOG(DEBUG, "port %u Rx queue %u registering mp %s" + " having %u chunks", dev->data->port_id, + rxq_ctrl->rxq.idx, mp->name, mp->nb_mem_chunks); + mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); + ret = rxq_alloc_elts(rxq_ctrl); + if (ret) goto error; - continue; } - /* Pre-register Rx mempool. */ - mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ? - rxq_ctrl->rxq.mprq_mp : rxq_ctrl->rxq.mp; - DRV_LOG(DEBUG, - "port %u Rx queue %u registering" - " mp %s having %u chunks", - dev->data->port_id, rxq_ctrl->rxq.idx, - mp->name, mp->nb_mem_chunks); - mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl, mp); - ret = rxq_alloc_elts(rxq_ctrl); - if (ret) - goto error; - rxq_ctrl->obj = mlx5_rxq_obj_new(dev, i, obj_type); + rxq_ctrl->obj = priv->obj_ops->rxq_obj_new(dev, i); if (!rxq_ctrl->obj) goto error; - if (obj_type == MLX5_RXQ_OBJ_TYPE_IBV) - rxq_ctrl->wqn = rxq_ctrl->obj->wq->wq_num; - else if (obj_type == MLX5_RXQ_OBJ_TYPE_DEVX_RQ) - rxq_ctrl->wqn = rxq_ctrl->obj->rq->id; } return 0; error: @@ -327,8 +309,12 @@ mlx5_dev_start(struct rte_eth_dev *dev) dev->data->port_id); goto error; } - /* Set a mask and offset of dynamic metadata flows into Rx queues*/ + /* Set a mask and offset of dynamic metadata flows into Rx queues. */ mlx5_flow_rxq_dynf_metadata_set(dev); + /* Set flags and context to convert Rx timestamps. */ + mlx5_rxq_timestamp_set(dev); + /* Set a mask and offset of scheduling on timestamp into Tx queues. */ + mlx5_txq_dynf_timestamp_set(dev); /* * In non-cached mode, it only needs to start the default mreg copy * action and no flow created by application exists anymore. @@ -344,7 +330,7 @@ mlx5_dev_start(struct rte_eth_dev *dev) dev->tx_pkt_burst = mlx5_select_tx_function(dev); dev->rx_pkt_burst = mlx5_select_rx_function(dev); /* Enable datapath on secondary process. */ - mlx5_mp_req_start_rxtx(dev); + mlx5_mp_os_req_start_rxtx(dev); if (priv->sh->intr_handle.fd >= 0) { priv->sh->port[priv->dev_port - 1].ih_port_id = (uint32_t)dev->data->port_id; @@ -390,7 +376,7 @@ mlx5_dev_stop(struct rte_eth_dev *dev) dev->tx_pkt_burst = removed_tx_burst; rte_wmb(); /* Disable datapath on secondary process. */ - mlx5_mp_req_stop_rxtx(dev); + mlx5_mp_os_req_stop_rxtx(dev); usleep(1000 * priv->rxqs_n); DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); mlx5_flow_stop_default(dev);