X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_trigger.c;h=ae7fcca229f84d1aab8e761494bc188f0ee7e7e4;hb=835731f63b0a89deedc6878a7028844b643fb54e;hp=cba736b4bc0bcbc258a1aae0f3da7fb75a964611;hpb=02109eaeacceeb8b2ea6fadea33bf334a805fa8e;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index cba736b4bc..ae7fcca229 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -6,15 +6,17 @@ #include #include -#include +#include #include #include +#include #include #include "mlx5.h" #include "mlx5_mr.h" -#include "mlx5_rxtx.h" +#include "mlx5_rx.h" +#include "mlx5_tx.h" #include "mlx5_utils.h" #include "rte_pmd_mlx5.h" @@ -77,6 +79,7 @@ mlx5_txq_start(struct rte_eth_dev *dev) } if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) { size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs); + txq_data->fcqs = mlx5_malloc(flags, size, RTE_CACHE_LINE_SIZE, txq_ctrl->socket); @@ -218,6 +221,8 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) struct mlx5_devx_obj *rq; unsigned int i; int ret = 0; + bool need_auto = false; + uint16_t self_port = dev->data->port_id; for (i = 0; i != priv->txqs_n; ++i) { txq_ctrl = mlx5_txq_get(dev, i); @@ -227,6 +232,28 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) mlx5_txq_release(dev, i); continue; } + if (txq_ctrl->hairpin_conf.peers[0].port != self_port) + continue; + if (txq_ctrl->hairpin_conf.manual_bind) { + mlx5_txq_release(dev, i); + return 0; + } + need_auto = true; + mlx5_txq_release(dev, i); + } + if (!need_auto) + return 0; + for (i = 0; i != priv->txqs_n; ++i) { + txq_ctrl = mlx5_txq_get(dev, i); + if (!txq_ctrl) + continue; + if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) { + mlx5_txq_release(dev, i); + continue; + } + /* Skip hairpin queues with other peer ports. */ + if (txq_ctrl->hairpin_conf.peers[0].port != self_port) + continue; if (!txq_ctrl->obj) { rte_errno = ENOMEM; DRV_LOG(ERR, "port %u no txq object found: %d", @@ -275,6 +302,9 @@ mlx5_hairpin_auto_bind(struct rte_eth_dev *dev) ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr); if (ret) goto error; + /* Qs with auto-bind will be destroyed directly. */ + rxq_ctrl->hairpin_status = 1; + txq_ctrl->hairpin_status = 1; mlx5_txq_release(dev, i); mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue); } @@ -906,7 +936,7 @@ mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port) return ret; } else - ret = mlx5_hairpin_bind_single_port(dev, rx_port); + ret = mlx5_hairpin_unbind_single_port(dev, rx_port); return ret; } @@ -1038,6 +1068,12 @@ mlx5_dev_start(struct rte_eth_dev *dev) dev->data->port_id, strerror(rte_errno)); goto error; } + if ((priv->config.devx && priv->config.dv_flow_en && + priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) { + ret = priv->obj_ops.lb_dummy_queue_create(dev); + if (ret) + goto error; + } ret = mlx5_txq_start(dev); if (ret) { DRV_LOG(ERR, "port %u Tx queue allocation failed: %s", @@ -1050,9 +1086,13 @@ mlx5_dev_start(struct rte_eth_dev *dev) dev->data->port_id, strerror(rte_errno)); goto error; } + /* + * Such step will be skipped if there is no hairpin TX queue configured + * with RX peer queue from the same device. + */ ret = mlx5_hairpin_auto_bind(dev); if (ret) { - DRV_LOG(ERR, "port %u hairpin binding failed: %s", + DRV_LOG(ERR, "port %u hairpin auto binding failed: %s", dev->data->port_id, strerror(rte_errno)); goto error; } @@ -1114,6 +1154,8 @@ error: mlx5_traffic_disable(dev); mlx5_txq_stop(dev); mlx5_rxq_stop(dev); + if (priv->obj_ops.lb_dummy_queue_release) + priv->obj_ops.lb_dummy_queue_release(dev); mlx5_txpp_stop(dev); /* Stop last. */ rte_errno = ret; /* Restore rte_errno. */ return -rte_errno; @@ -1139,18 +1181,21 @@ mlx5_dev_stop(struct rte_eth_dev *dev) rte_wmb(); /* Disable datapath on secondary process. */ mlx5_mp_os_req_stop_rxtx(dev); - usleep(1000 * priv->rxqs_n); + rte_delay_us_sleep(1000 * priv->rxqs_n); DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id); mlx5_flow_stop_default(dev); /* Control flows for default traffic can be removed firstly. */ mlx5_traffic_disable(dev); /* All RX queue flags will be cleared in the flush interface. */ mlx5_flow_list_flush(dev, &priv->flows, true); + mlx5_flow_meter_rxq_flush(dev); mlx5_rx_intr_vec_disable(dev); priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS; priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; mlx5_txq_stop(dev); mlx5_rxq_stop(dev); + if (priv->obj_ops.lb_dummy_queue_release) + priv->obj_ops.lb_dummy_queue_release(dev); mlx5_txpp_stop(dev); return 0; @@ -1203,7 +1248,11 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i); if (!txq_ctrl) continue; - if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { + /* Only Tx implicit mode requires the default Tx flow. */ + if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN && + txq_ctrl->hairpin_conf.tx_explicit == 0 && + txq_ctrl->hairpin_conf.peers[0].port == + priv->dev_data->port_id) { ret = mlx5_ctrl_flow_source_queue(dev, i); if (ret) { mlx5_txq_release(dev, i); @@ -1279,8 +1328,12 @@ mlx5_traffic_enable(struct rte_eth_dev *dev) goto error; ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, &ipv6_multi_mask); - if (ret) - goto error; + if (ret) { + /* Do not fail on IPv6 broadcast creation failure. */ + DRV_LOG(WARNING, + "IPv6 broadcast is not supported"); + ret = 0; + } } } /* Add MAC address flows. */