#include <mlx5_malloc.h>
#include "mlx5.h"
+#include "mlx5_flow.h"
#include "mlx5_rx.h"
#include "mlx5_tx.h"
#include "mlx5_utils.h"
if (!txq_ctrl)
continue;
- if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
+ if (!txq_ctrl->is_hairpin)
txq_alloc_elts(txq_ctrl);
MLX5_ASSERT(!txq_ctrl->obj);
txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj),
txq_ctrl->obj = NULL;
goto error;
}
- if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
+ if (!txq_ctrl->is_hairpin) {
size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
txq_data->fcqs = mlx5_malloc(flags, size,
return -rte_errno;
}
-/**
- * Translate the chunk address to MR key in order to put in into the cache.
- */
-static void
-mlx5_rxq_mempool_register_cb(struct rte_mempool *mp, void *opaque,
- struct rte_mempool_memhdr *memhdr,
- unsigned int idx)
-{
- struct mlx5_rxq_data *rxq = opaque;
-
- RTE_SET_USED(mp);
- RTE_SET_USED(idx);
- mlx5_rx_addr2mr(rxq, (uintptr_t)memhdr->addr);
-}
-
/**
* Register Rx queue mempools and fill the Rx queue cache.
* This function tolerates repeated mempool registration.
static int
mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
{
- struct mlx5_priv *priv = rxq_ctrl->priv;
struct rte_mempool *mp;
uint32_t s;
int ret = 0;
mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
/* MPRQ mempool is registered on creation, just fill the cache. */
- if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
- rte_mempool_mem_iter(rxq_ctrl->rxq.mprq_mp,
- mlx5_rxq_mempool_register_cb,
- &rxq_ctrl->rxq);
- return 0;
- }
+ if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
+ return mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
+ rxq_ctrl->rxq.mprq_mp);
for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
+ bool is_extmem;
+
mp = rxq_ctrl->rxq.rxseg[s].mp;
- ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
- priv->sh->cdev->pd, mp,
- &priv->mp_id);
+ is_extmem = (rte_pktmbuf_priv_flags(mp) &
+ RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0;
+ ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp,
+ is_extmem);
if (ret < 0 && rte_errno != EEXIST)
return ret;
- rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
- &rxq_ctrl->rxq);
+ ret = mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
+ mp);
+ if (ret < 0)
+ return ret;
}
return 0;
}
mlx5_rxq_release(dev, i);
}
+static int
+mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,
+ unsigned int idx)
+{
+ int ret = 0;
+
+ if (!rxq_ctrl->is_hairpin) {
+ /*
+ * Pre-register the mempools. Regardless of whether
+ * the implicit registration is enabled or not,
+ * Rx mempool destruction is tracked to free MRs.
+ */
+ if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
+ return -rte_errno;
+ ret = rxq_alloc_elts(rxq_ctrl);
+ if (ret)
+ return ret;
+ }
+ MLX5_ASSERT(!rxq_ctrl->obj);
+ rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ sizeof(*rxq_ctrl->obj), 0,
+ rxq_ctrl->socket);
+ if (!rxq_ctrl->obj) {
+ DRV_LOG(ERR, "Port %u Rx queue %u can't allocate resources.",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", dev->data->port_id,
+ idx, (void *)&rxq_ctrl->obj);
+ return 0;
+}
+
/**
* Start traffic on Rx queues.
*
/* Should not release Rx queues but return immediately. */
return -rte_errno;
}
- DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
- dev->data->port_id, priv->sh->device_attr.max_qp_wr);
- DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
- dev->data->port_id, priv->sh->device_attr.max_sge);
+ DRV_LOG(DEBUG, "Port %u dev_cap.max_qp_wr is %d.",
+ dev->data->port_id, priv->sh->dev_cap.max_qp_wr);
+ DRV_LOG(DEBUG, "Port %u dev_cap.max_sge is %d.",
+ dev->data->port_id, priv->sh->dev_cap.max_sge);
for (i = 0; i != priv->rxqs_n; ++i) {
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i);
+ struct mlx5_rxq_ctrl *rxq_ctrl;
- if (!rxq_ctrl)
+ if (rxq == NULL)
continue;
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
- /*
- * Pre-register the mempools. Regardless of whether
- * the implicit registration is enabled or not,
- * Rx mempool destruction is tracked to free MRs.
- */
- if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
- goto error;
- ret = rxq_alloc_elts(rxq_ctrl);
- if (ret)
+ rxq_ctrl = rxq->ctrl;
+ if (!rxq_ctrl->started) {
+ if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0)
goto error;
+ LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
}
- MLX5_ASSERT(!rxq_ctrl->obj);
- rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
- sizeof(*rxq_ctrl->obj), 0,
- rxq_ctrl->socket);
- if (!rxq_ctrl->obj) {
- DRV_LOG(ERR,
- "Port %u Rx queue %u can't allocate resources.",
- dev->data->port_id, (*priv->rxqs)[i]->idx);
- rte_errno = ENOMEM;
- goto error;
- }
- ret = priv->obj_ops.rxq_obj_new(dev, i);
+ ret = priv->obj_ops.rxq_obj_new(rxq);
if (ret) {
mlx5_free(rxq_ctrl->obj);
+ rxq_ctrl->obj = NULL;
goto error;
}
- DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.",
- dev->data->port_id, i, (void *)&rxq_ctrl->obj);
- LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
+ rxq_ctrl->started = true;
}
return 0;
error:
struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
struct mlx5_txq_ctrl *txq_ctrl;
+ struct mlx5_rxq_priv *rxq;
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_devx_obj *sq;
struct mlx5_devx_obj *rq;
txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+ if (!txq_ctrl->is_hairpin ||
txq_ctrl->hairpin_conf.peers[0].port != self_port) {
mlx5_txq_release(dev, i);
continue;
if (!txq_ctrl)
continue;
/* Skip hairpin queues with other peer ports. */
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
+ if (!txq_ctrl->is_hairpin ||
txq_ctrl->hairpin_conf.peers[0].port != self_port) {
mlx5_txq_release(dev, i);
continue;
return -rte_errno;
}
sq = txq_ctrl->obj->sq;
- rxq_ctrl = mlx5_rxq_get(dev,
- txq_ctrl->hairpin_conf.peers[0].queue);
- if (!rxq_ctrl) {
+ rxq = mlx5_rxq_get(dev, txq_ctrl->hairpin_conf.peers[0].queue);
+ if (rxq == NULL) {
mlx5_txq_release(dev, i);
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u no rxq object found: %d",
txq_ctrl->hairpin_conf.peers[0].queue);
return -rte_errno;
}
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
- rxq_ctrl->hairpin_conf.peers[0].queue != i) {
+ rxq_ctrl = rxq->ctrl;
+ if (!rxq_ctrl->is_hairpin ||
+ rxq->hairpin_conf.peers[0].queue != i) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
"Rx queue %d", dev->data->port_id,
sq_attr.state = MLX5_SQC_STATE_RDY;
sq_attr.sq_state = MLX5_SQC_STATE_RST;
sq_attr.hairpin_peer_rq = rq->id;
- sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
+ sq_attr.hairpin_peer_vhca =
+ priv->sh->cdev->config.hca_attr.vhca_id;
ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
if (ret)
goto error;
rq_attr.state = MLX5_SQC_STATE_RDY;
rq_attr.rq_state = MLX5_SQC_STATE_RST;
rq_attr.hairpin_peer_sq = sq->id;
- rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
+ rq_attr.hairpin_peer_vhca =
+ priv->sh->cdev->config.hca_attr.vhca_id;
ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
if (ret)
goto error;
/* Qs with auto-bind will be destroyed directly. */
- rxq_ctrl->hairpin_status = 1;
+ rxq->hairpin_status = 1;
txq_ctrl->hairpin_status = 1;
mlx5_txq_release(dev, i);
- mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
}
return 0;
error:
mlx5_txq_release(dev, i);
- mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
return -rte_errno;
}
dev->data->port_id, peer_queue);
return -rte_errno;
}
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d is not a hairpin Txq",
dev->data->port_id, peer_queue);
return -rte_errno;
}
peer_info->qp_id = txq_ctrl->obj->sq->id;
- peer_info->vhca_id = priv->config.hca_attr.vhca_id;
+ peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
/* 1-to-1 mapping, only the first one is used. */
peer_info->peer_q = txq_ctrl->hairpin_conf.peers[0].queue;
peer_info->tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
peer_info->manual_bind = txq_ctrl->hairpin_conf.manual_bind;
mlx5_txq_release(dev, peer_queue);
} else { /* Peer port used as ingress. */
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, peer_queue);
struct mlx5_rxq_ctrl *rxq_ctrl;
- rxq_ctrl = mlx5_rxq_get(dev, peer_queue);
- if (rxq_ctrl == NULL) {
+ if (rxq == NULL) {
rte_errno = EINVAL;
DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
dev->data->port_id, peer_queue);
return -rte_errno;
}
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
+ rxq_ctrl = rxq->ctrl;
+ if (!rxq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq",
dev->data->port_id, peer_queue);
- mlx5_rxq_release(dev, peer_queue);
return -rte_errno;
}
if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u no Rxq object found: %d",
dev->data->port_id, peer_queue);
- mlx5_rxq_release(dev, peer_queue);
return -rte_errno;
}
peer_info->qp_id = rxq_ctrl->obj->rq->id;
- peer_info->vhca_id = priv->config.hca_attr.vhca_id;
- peer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue;
- peer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit;
- peer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind;
- mlx5_rxq_release(dev, peer_queue);
+ peer_info->vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
+ peer_info->peer_q = rxq->hairpin_conf.peers[0].queue;
+ peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit;
+ peer_info->manual_bind = rxq->hairpin_conf.manual_bind;
}
return 0;
}
dev->data->port_id, cur_queue);
return -rte_errno;
}
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
dev->data->port_id, cur_queue);
txq_ctrl->hairpin_status = 1;
mlx5_txq_release(dev, cur_queue);
} else {
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
- rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
- if (rxq_ctrl == NULL) {
+ if (rxq == NULL) {
rte_errno = EINVAL;
DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
dev->data->port_id, cur_queue);
return -rte_errno;
}
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
+ rxq_ctrl = rxq->ctrl;
+ if (!rxq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u no Rxq object found: %d",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
- if (rxq_ctrl->hairpin_status != 0) {
+ if (rxq->hairpin_status != 0) {
DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return 0;
}
if (peer_info->tx_explicit !=
- rxq_ctrl->hairpin_conf.tx_explicit) {
+ rxq->hairpin_conf.tx_explicit) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
" mismatch", dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
if (peer_info->manual_bind !=
- rxq_ctrl->hairpin_conf.manual_bind) {
+ rxq->hairpin_conf.manual_bind) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
" mismatch", dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
rq_attr.state = MLX5_SQC_STATE_RDY;
rq_attr.hairpin_peer_vhca = peer_info->vhca_id;
ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
if (ret == 0)
- rxq_ctrl->hairpin_status = 1;
- mlx5_rxq_release(dev, cur_queue);
+ rxq->hairpin_status = 1;
}
return ret;
}
dev->data->port_id, cur_queue);
return -rte_errno;
}
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
dev->data->port_id, cur_queue);
txq_ctrl->hairpin_status = 0;
mlx5_txq_release(dev, cur_queue);
} else {
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
- rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
- if (rxq_ctrl == NULL) {
+ if (rxq == NULL) {
rte_errno = EINVAL;
DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
dev->data->port_id, cur_queue);
return -rte_errno;
}
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
+ rxq_ctrl = rxq->ctrl;
+ if (!rxq_ctrl->is_hairpin) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
- if (rxq_ctrl->hairpin_status == 0) {
+ if (rxq->hairpin_status == 0) {
DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return 0;
}
if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u no Rxq object found: %d",
dev->data->port_id, cur_queue);
- mlx5_rxq_release(dev, cur_queue);
return -rte_errno;
}
rq_attr.state = MLX5_SQC_STATE_RST;
rq_attr.rq_state = MLX5_SQC_STATE_RST;
ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
if (ret == 0)
- rxq_ctrl->hairpin_status = 0;
- mlx5_rxq_release(dev, cur_queue);
+ rxq->hairpin_status = 0;
}
return ret;
}
txq_ctrl = mlx5_txq_get(dev, i);
if (txq_ctrl == NULL)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
mlx5_txq_release(dev, i);
continue;
}
txq_ctrl = mlx5_txq_get(dev, i);
if (txq_ctrl == NULL)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
mlx5_txq_release(dev, i);
continue;
}
/* Pass TxQ's information to peer RxQ and try binding. */
cur.peer_q = rx_queue;
cur.qp_id = txq_ctrl->obj->sq->id;
- cur.vhca_id = priv->config.hca_attr.vhca_id;
+ cur.vhca_id = priv->sh->cdev->config.hca_attr.vhca_id;
cur.tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
cur.manual_bind = txq_ctrl->hairpin_conf.manual_bind;
/*
/*
* Unbind the hairpin port pair, HW configuration of both devices will be clear
- * and status will be reset for all the queues used between the them.
+ * and status will be reset for all the queues used between them.
* This function only supports to unbind the Tx from one Rx.
*
* @param dev
txq_ctrl = mlx5_txq_get(dev, i);
if (txq_ctrl == NULL)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
mlx5_txq_release(dev, i);
continue;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_txq_ctrl *txq_ctrl;
- struct mlx5_rxq_ctrl *rxq_ctrl;
uint32_t i;
uint16_t pp;
uint32_t bits[(RTE_MAX_ETHPORTS + 31) / 32] = {0};
txq_ctrl = mlx5_txq_get(dev, i);
if (!txq_ctrl)
continue;
- if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
+ if (!txq_ctrl->is_hairpin) {
mlx5_txq_release(dev, i);
continue;
}
}
} else {
for (i = 0; i < priv->rxqs_n; i++) {
- rxq_ctrl = mlx5_rxq_get(dev, i);
- if (!rxq_ctrl)
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ if (rxq == NULL)
continue;
- if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
- mlx5_rxq_release(dev, i);
+ rxq_ctrl = rxq->ctrl;
+ if (!rxq_ctrl->is_hairpin)
continue;
- }
- pp = rxq_ctrl->hairpin_conf.peers[0].port;
+ pp = rxq->hairpin_conf.peers[0].port;
if (pp >= RTE_MAX_ETHPORTS) {
rte_errno = ERANGE;
- mlx5_rxq_release(dev, i);
DRV_LOG(ERR, "port %hu queue %u peer port "
"out of range %hu",
priv->dev_data->port_id, i, pp);
return -rte_errno;
}
bits[pp / 32] |= 1 << (pp % 32);
- mlx5_rxq_release(dev, i);
}
}
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
dev->data->port_id, strerror(rte_errno));
goto error;
}
- if ((priv->sh->devx && priv->config.dv_flow_en &&
- priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
+ if (mlx5_devx_obj_ops_en(priv->sh) &&
+ priv->obj_ops.lb_dummy_queue_create) {
ret = priv->obj_ops.lb_dummy_queue_create(dev);
if (ret)
goto error;
dev->data->port_id, strerror(rte_errno));
goto error;
}
+ if (priv->config.std_delay_drop || priv->config.hp_delay_drop) {
+ if (!priv->sh->dev_cap.vf && !priv->sh->dev_cap.sf &&
+ !priv->representor) {
+ ret = mlx5_get_flag_dropless_rq(dev);
+ if (ret < 0)
+ DRV_LOG(WARNING,
+ "port %u cannot query dropless flag",
+ dev->data->port_id);
+ else if (!ret)
+ DRV_LOG(WARNING,
+ "port %u dropless_rq OFF, no rearming",
+ dev->data->port_id);
+ } else {
+ DRV_LOG(DEBUG,
+ "port %u doesn't support dropless_rq flag",
+ dev->data->port_id);
+ }
+ }
ret = mlx5_rxq_start(dev);
if (ret) {
DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
goto error;
}
mlx5_os_stats_init(dev);
+ /*
+ * Attach indirection table objects detached on port stop.
+ * They may be needed to create RSS in non-isolated mode.
+ */
+ ret = mlx5_action_handle_attach(dev);
+ if (ret) {
+ DRV_LOG(ERR,
+ "port %u failed to attach indirect actions: %s",
+ dev->data->port_id, rte_strerror(rte_errno));
+ goto error;
+ }
ret = mlx5_traffic_enable(dev);
if (ret) {
DRV_LOG(ERR, "port %u failed to set defaults flows",
dev->rx_pkt_burst = mlx5_select_rx_function(dev);
/* Enable datapath on secondary process. */
mlx5_mp_os_req_start_rxtx(dev);
- if (priv->sh->intr_handle.fd >= 0) {
+ if (rte_intr_fd_get(priv->sh->intr_handle) >= 0) {
priv->sh->port[priv->dev_port - 1].ih_port_id =
(uint32_t)dev->data->port_id;
} else {
- DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
+ DRV_LOG(INFO, "port %u starts without RMV interrupts.",
dev->data->port_id);
- dev->data->dev_conf.intr_conf.lsc = 0;
dev->data->dev_conf.intr_conf.rmv = 0;
}
- if (priv->sh->intr_handle_devx.fd >= 0)
+ if (rte_intr_fd_get(priv->sh->intr_handle_nl) >= 0) {
+ priv->sh->port[priv->dev_port - 1].nl_ih_port_id =
+ (uint32_t)dev->data->port_id;
+ } else {
+ DRV_LOG(INFO, "port %u starts without LSC interrupts.",
+ dev->data->port_id);
+ dev->data->dev_conf.intr_conf.lsc = 0;
+ }
+ if (rte_intr_fd_get(priv->sh->intr_handle_devx) >= 0)
priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
(uint32_t)dev->data->port_id;
return 0;
dev->data->dev_started = 0;
/* Prevent crashes when queues are still in use. */
- dev->rx_pkt_burst = removed_rx_burst;
- dev->tx_pkt_burst = removed_tx_burst;
+ dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
+ dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
rte_wmb();
/* Disable datapath on secondary process. */
mlx5_mp_os_req_stop_rxtx(dev);
/* All RX queue flags will be cleared in the flush interface. */
mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
mlx5_flow_meter_rxq_flush(dev);
+ mlx5_action_handle_detach(dev);
mlx5_rx_intr_vec_disable(dev);
priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
+ priv->sh->port[priv->dev_port - 1].nl_ih_port_id = RTE_MAX_ETHPORTS;
mlx5_txq_stop(dev);
mlx5_rxq_stop(dev);
if (priv->obj_ops.lb_dummy_queue_release)
* Enable traffic flows configured by control plane
*
* @param dev
- * Pointer to Ethernet device private data.
- * @param dev
* Pointer to Ethernet device structure.
*
* @return
if (!txq_ctrl)
continue;
/* Only Tx implicit mode requires the default Tx flow. */
- if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN &&
+ if (txq_ctrl->is_hairpin &&
txq_ctrl->hairpin_conf.tx_explicit == 0 &&
txq_ctrl->hairpin_conf.peers[0].port ==
priv->dev_data->port_id) {
goto error;
}
}
+ if (priv->sh->config.dv_esw_en) {
+ if (mlx5_flow_create_devx_sq_miss_flow(dev, i) == 0) {
+ DRV_LOG(ERR,
+ "Port %u Tx queue %u SQ create representor devx default miss rule failed.",
+ dev->data->port_id, i);
+ goto error;
+ }
+ }
mlx5_txq_release(dev, i);
}
- if (priv->config.dv_esw_en && !priv->config.vf && !priv->config.sf) {
+ if (priv->sh->config.dv_esw_en) {
if (mlx5_flow_create_esw_table_zero_flow(dev))
priv->fdb_def_rule = 1;
else
" configured - only Eswitch group 0 flows are"
" supported.", dev->data->port_id);
}
- if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
+ if (!priv->sh->config.lacp_by_user && priv->pf_bond >= 0) {
ret = mlx5_flow_lacp_miss(dev);
if (ret)
DRV_LOG(INFO, "port %u LACP rule cannot be created - "