1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
9 #include <ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12 #include <rte_cycles.h>
14 #include <mlx5_malloc.h>
17 #include "mlx5_flow.h"
20 #include "mlx5_utils.h"
21 #include "rte_pmd_mlx5.h"
24 * Stop traffic on Tx queues.
27 * Pointer to Ethernet device structure.
30 mlx5_txq_stop(struct rte_eth_dev *dev)
32 struct mlx5_priv *priv = dev->data->dev_private;
35 for (i = 0; i != priv->txqs_n; ++i)
36 mlx5_txq_release(dev, i);
40 * Start traffic on Tx queues.
43 * Pointer to Ethernet device structure.
46 * 0 on success, a negative errno value otherwise and rte_errno is set.
49 mlx5_txq_start(struct rte_eth_dev *dev)
51 struct mlx5_priv *priv = dev->data->dev_private;
55 for (i = 0; i != priv->txqs_n; ++i) {
56 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
57 struct mlx5_txq_data *txq_data = &txq_ctrl->txq;
58 uint32_t flags = MLX5_MEM_RTE | MLX5_MEM_ZERO;
62 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
63 txq_alloc_elts(txq_ctrl);
64 MLX5_ASSERT(!txq_ctrl->obj);
65 txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj),
68 DRV_LOG(ERR, "Port %u Tx queue %u cannot allocate "
69 "memory resources.", dev->data->port_id,
74 ret = priv->obj_ops.txq_obj_new(dev, i);
76 mlx5_free(txq_ctrl->obj);
80 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
81 size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
83 txq_data->fcqs = mlx5_malloc(flags, size,
86 if (!txq_data->fcqs) {
87 DRV_LOG(ERR, "Port %u Tx queue %u cannot "
88 "allocate memory (FCQ).",
89 dev->data->port_id, i);
94 DRV_LOG(DEBUG, "Port %u txq %u updated with %p.",
95 dev->data->port_id, i, (void *)&txq_ctrl->obj);
96 LIST_INSERT_HEAD(&priv->txqsobj, txq_ctrl->obj, next);
100 ret = rte_errno; /* Save rte_errno before cleanup. */
102 mlx5_txq_release(dev, i);
104 rte_errno = ret; /* Restore rte_errno. */
109 * Register Rx queue mempools and fill the Rx queue cache.
110 * This function tolerates repeated mempool registration.
112 * @param[in] rxq_ctrl
113 * Rx queue control data.
116 * 0 on success, (-1) on failure and rte_errno is set.
119 mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
121 struct rte_mempool *mp;
125 mlx5_mr_flush_local_cache(&rxq_ctrl->rxq.mr_ctrl);
126 /* MPRQ mempool is registered on creation, just fill the cache. */
127 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
128 return mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
129 rxq_ctrl->rxq.mprq_mp);
130 for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
133 mp = rxq_ctrl->rxq.rxseg[s].mp;
134 is_extmem = (rte_pktmbuf_priv_flags(mp) &
135 RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) != 0;
136 ret = mlx5_mr_mempool_register(rxq_ctrl->sh->cdev, mp,
138 if (ret < 0 && rte_errno != EEXIST)
140 ret = mlx5_mr_mempool_populate_cache(&rxq_ctrl->rxq.mr_ctrl,
149 * Stop traffic on Rx queues.
152 * Pointer to Ethernet device structure.
155 mlx5_rxq_stop(struct rte_eth_dev *dev)
157 struct mlx5_priv *priv = dev->data->dev_private;
160 for (i = 0; i != priv->rxqs_n; ++i)
161 mlx5_rxq_release(dev, i);
165 mlx5_rxq_ctrl_prepare(struct rte_eth_dev *dev, struct mlx5_rxq_ctrl *rxq_ctrl,
170 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
172 * Pre-register the mempools. Regardless of whether
173 * the implicit registration is enabled or not,
174 * Rx mempool destruction is tracked to free MRs.
176 if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
178 ret = rxq_alloc_elts(rxq_ctrl);
182 MLX5_ASSERT(!rxq_ctrl->obj);
183 rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
184 sizeof(*rxq_ctrl->obj), 0,
186 if (!rxq_ctrl->obj) {
187 DRV_LOG(ERR, "Port %u Rx queue %u can't allocate resources.",
188 dev->data->port_id, idx);
192 DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.", dev->data->port_id,
193 idx, (void *)&rxq_ctrl->obj);
198 * Start traffic on Rx queues.
201 * Pointer to Ethernet device structure.
204 * 0 on success, a negative errno value otherwise and rte_errno is set.
207 mlx5_rxq_start(struct rte_eth_dev *dev)
209 struct mlx5_priv *priv = dev->data->dev_private;
213 /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
214 if (mlx5_mprq_alloc_mp(dev)) {
215 /* Should not release Rx queues but return immediately. */
218 DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
219 dev->data->port_id, priv->sh->device_attr.max_qp_wr);
220 DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
221 dev->data->port_id, priv->sh->device_attr.max_sge);
222 for (i = 0; i != priv->rxqs_n; ++i) {
223 struct mlx5_rxq_priv *rxq = mlx5_rxq_ref(dev, i);
224 struct mlx5_rxq_ctrl *rxq_ctrl;
228 rxq_ctrl = rxq->ctrl;
229 if (!rxq_ctrl->started) {
230 if (mlx5_rxq_ctrl_prepare(dev, rxq_ctrl, i) < 0)
232 LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
234 ret = priv->obj_ops.rxq_obj_new(rxq);
236 mlx5_free(rxq_ctrl->obj);
237 rxq_ctrl->obj = NULL;
240 rxq_ctrl->started = true;
244 ret = rte_errno; /* Save rte_errno before cleanup. */
246 mlx5_rxq_release(dev, i);
248 rte_errno = ret; /* Restore rte_errno. */
253 * Binds Tx queues to Rx queues for hairpin.
255 * Binds Tx queues to the target Rx queues.
258 * Pointer to Ethernet device structure.
261 * 0 on success, a negative errno value otherwise and rte_errno is set.
264 mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
266 struct mlx5_priv *priv = dev->data->dev_private;
267 struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
268 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
269 struct mlx5_txq_ctrl *txq_ctrl;
270 struct mlx5_rxq_priv *rxq;
271 struct mlx5_rxq_ctrl *rxq_ctrl;
272 struct mlx5_devx_obj *sq;
273 struct mlx5_devx_obj *rq;
276 bool need_auto = false;
277 uint16_t self_port = dev->data->port_id;
279 for (i = 0; i != priv->txqs_n; ++i) {
280 txq_ctrl = mlx5_txq_get(dev, i);
283 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
284 txq_ctrl->hairpin_conf.peers[0].port != self_port) {
285 mlx5_txq_release(dev, i);
288 if (txq_ctrl->hairpin_conf.manual_bind) {
289 mlx5_txq_release(dev, i);
293 mlx5_txq_release(dev, i);
297 for (i = 0; i != priv->txqs_n; ++i) {
298 txq_ctrl = mlx5_txq_get(dev, i);
301 /* Skip hairpin queues with other peer ports. */
302 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
303 txq_ctrl->hairpin_conf.peers[0].port != self_port) {
304 mlx5_txq_release(dev, i);
307 if (!txq_ctrl->obj) {
309 DRV_LOG(ERR, "port %u no txq object found: %d",
310 dev->data->port_id, i);
311 mlx5_txq_release(dev, i);
314 sq = txq_ctrl->obj->sq;
315 rxq = mlx5_rxq_get(dev, txq_ctrl->hairpin_conf.peers[0].queue);
317 mlx5_txq_release(dev, i);
319 DRV_LOG(ERR, "port %u no rxq object found: %d",
321 txq_ctrl->hairpin_conf.peers[0].queue);
324 rxq_ctrl = rxq->ctrl;
325 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
326 rxq->hairpin_conf.peers[0].queue != i) {
328 DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
329 "Rx queue %d", dev->data->port_id,
330 i, txq_ctrl->hairpin_conf.peers[0].queue);
333 rq = rxq_ctrl->obj->rq;
336 DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
338 txq_ctrl->hairpin_conf.peers[0].queue);
341 sq_attr.state = MLX5_SQC_STATE_RDY;
342 sq_attr.sq_state = MLX5_SQC_STATE_RST;
343 sq_attr.hairpin_peer_rq = rq->id;
344 sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
345 ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
348 rq_attr.state = MLX5_SQC_STATE_RDY;
349 rq_attr.rq_state = MLX5_SQC_STATE_RST;
350 rq_attr.hairpin_peer_sq = sq->id;
351 rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
352 ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
355 /* Qs with auto-bind will be destroyed directly. */
356 rxq->hairpin_status = 1;
357 txq_ctrl->hairpin_status = 1;
358 mlx5_txq_release(dev, i);
362 mlx5_txq_release(dev, i);
367 * Fetch the peer queue's SW & HW information.
370 * Pointer to Ethernet device structure.
372 * Index of the queue to fetch the information.
373 * @param current_info
374 * Pointer to the input peer information, not used currently.
376 * Pointer to the structure to store the information, output.
378 * Positive to get the RxQ information, zero to get the TxQ information.
381 * 0 on success, a negative errno value otherwise and rte_errno is set.
384 mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
385 struct rte_hairpin_peer_info *current_info,
386 struct rte_hairpin_peer_info *peer_info,
389 struct mlx5_priv *priv = dev->data->dev_private;
390 RTE_SET_USED(current_info);
392 if (dev->data->dev_started == 0) {
394 DRV_LOG(ERR, "peer port %u is not started",
399 * Peer port used as egress. In the current design, hairpin Tx queue
400 * will be bound to the peer Rx queue. Indeed, only the information of
401 * peer Rx queue needs to be fetched.
403 if (direction == 0) {
404 struct mlx5_txq_ctrl *txq_ctrl;
406 txq_ctrl = mlx5_txq_get(dev, peer_queue);
407 if (txq_ctrl == NULL) {
409 DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
410 dev->data->port_id, peer_queue);
413 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
415 DRV_LOG(ERR, "port %u queue %d is not a hairpin Txq",
416 dev->data->port_id, peer_queue);
417 mlx5_txq_release(dev, peer_queue);
420 if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) {
422 DRV_LOG(ERR, "port %u no Txq object found: %d",
423 dev->data->port_id, peer_queue);
424 mlx5_txq_release(dev, peer_queue);
427 peer_info->qp_id = txq_ctrl->obj->sq->id;
428 peer_info->vhca_id = priv->config.hca_attr.vhca_id;
429 /* 1-to-1 mapping, only the first one is used. */
430 peer_info->peer_q = txq_ctrl->hairpin_conf.peers[0].queue;
431 peer_info->tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
432 peer_info->manual_bind = txq_ctrl->hairpin_conf.manual_bind;
433 mlx5_txq_release(dev, peer_queue);
434 } else { /* Peer port used as ingress. */
435 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, peer_queue);
436 struct mlx5_rxq_ctrl *rxq_ctrl;
440 DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
441 dev->data->port_id, peer_queue);
444 rxq_ctrl = rxq->ctrl;
445 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
447 DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq",
448 dev->data->port_id, peer_queue);
451 if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
453 DRV_LOG(ERR, "port %u no Rxq object found: %d",
454 dev->data->port_id, peer_queue);
457 peer_info->qp_id = rxq_ctrl->obj->rq->id;
458 peer_info->vhca_id = priv->config.hca_attr.vhca_id;
459 peer_info->peer_q = rxq->hairpin_conf.peers[0].queue;
460 peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit;
461 peer_info->manual_bind = rxq->hairpin_conf.manual_bind;
467 * Bind the hairpin queue with the peer HW information.
468 * This needs to be called twice both for Tx and Rx queues of a pair.
469 * If the queue is already bound, it is considered successful.
472 * Pointer to Ethernet device structure.
474 * Index of the queue to change the HW configuration to bind.
476 * Pointer to information of the peer queue.
478 * Positive to configure the TxQ, zero to configure the RxQ.
481 * 0 on success, a negative errno value otherwise and rte_errno is set.
484 mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
485 struct rte_hairpin_peer_info *peer_info,
491 * Consistency checking of the peer queue: opposite direction is used
492 * to get the peer queue info with ethdev port ID, no need to check.
494 if (peer_info->peer_q != cur_queue) {
496 DRV_LOG(ERR, "port %u queue %d and peer queue %d mismatch",
497 dev->data->port_id, cur_queue, peer_info->peer_q);
500 if (direction != 0) {
501 struct mlx5_txq_ctrl *txq_ctrl;
502 struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
504 txq_ctrl = mlx5_txq_get(dev, cur_queue);
505 if (txq_ctrl == NULL) {
507 DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
508 dev->data->port_id, cur_queue);
511 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
513 DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
514 dev->data->port_id, cur_queue);
515 mlx5_txq_release(dev, cur_queue);
518 if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) {
520 DRV_LOG(ERR, "port %u no Txq object found: %d",
521 dev->data->port_id, cur_queue);
522 mlx5_txq_release(dev, cur_queue);
525 if (txq_ctrl->hairpin_status != 0) {
526 DRV_LOG(DEBUG, "port %u Tx queue %d is already bound",
527 dev->data->port_id, cur_queue);
528 mlx5_txq_release(dev, cur_queue);
532 * All queues' of one port consistency checking is done in the
533 * bind() function, and that is optional.
535 if (peer_info->tx_explicit !=
536 txq_ctrl->hairpin_conf.tx_explicit) {
538 DRV_LOG(ERR, "port %u Tx queue %d and peer Tx rule mode"
539 " mismatch", dev->data->port_id, cur_queue);
540 mlx5_txq_release(dev, cur_queue);
543 if (peer_info->manual_bind !=
544 txq_ctrl->hairpin_conf.manual_bind) {
546 DRV_LOG(ERR, "port %u Tx queue %d and peer binding mode"
547 " mismatch", dev->data->port_id, cur_queue);
548 mlx5_txq_release(dev, cur_queue);
551 sq_attr.state = MLX5_SQC_STATE_RDY;
552 sq_attr.sq_state = MLX5_SQC_STATE_RST;
553 sq_attr.hairpin_peer_rq = peer_info->qp_id;
554 sq_attr.hairpin_peer_vhca = peer_info->vhca_id;
555 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr);
557 txq_ctrl->hairpin_status = 1;
558 mlx5_txq_release(dev, cur_queue);
560 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
561 struct mlx5_rxq_ctrl *rxq_ctrl;
562 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
566 DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
567 dev->data->port_id, cur_queue);
570 rxq_ctrl = rxq->ctrl;
571 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
573 DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
574 dev->data->port_id, cur_queue);
577 if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
579 DRV_LOG(ERR, "port %u no Rxq object found: %d",
580 dev->data->port_id, cur_queue);
583 if (rxq->hairpin_status != 0) {
584 DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
585 dev->data->port_id, cur_queue);
588 if (peer_info->tx_explicit !=
589 rxq->hairpin_conf.tx_explicit) {
591 DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
592 " mismatch", dev->data->port_id, cur_queue);
595 if (peer_info->manual_bind !=
596 rxq->hairpin_conf.manual_bind) {
598 DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
599 " mismatch", dev->data->port_id, cur_queue);
602 rq_attr.state = MLX5_SQC_STATE_RDY;
603 rq_attr.rq_state = MLX5_SQC_STATE_RST;
604 rq_attr.hairpin_peer_sq = peer_info->qp_id;
605 rq_attr.hairpin_peer_vhca = peer_info->vhca_id;
606 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
608 rxq->hairpin_status = 1;
614 * Unbind the hairpin queue and reset its HW configuration.
615 * This needs to be called twice both for Tx and Rx queues of a pair.
616 * If the queue is already unbound, it is considered successful.
619 * Pointer to Ethernet device structure.
621 * Index of the queue to change the HW configuration to unbind.
623 * Positive to reset the TxQ, zero to reset the RxQ.
626 * 0 on success, a negative errno value otherwise and rte_errno is set.
629 mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
634 if (direction != 0) {
635 struct mlx5_txq_ctrl *txq_ctrl;
636 struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
638 txq_ctrl = mlx5_txq_get(dev, cur_queue);
639 if (txq_ctrl == NULL) {
641 DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
642 dev->data->port_id, cur_queue);
645 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
647 DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
648 dev->data->port_id, cur_queue);
649 mlx5_txq_release(dev, cur_queue);
652 /* Already unbound, return success before obj checking. */
653 if (txq_ctrl->hairpin_status == 0) {
654 DRV_LOG(DEBUG, "port %u Tx queue %d is already unbound",
655 dev->data->port_id, cur_queue);
656 mlx5_txq_release(dev, cur_queue);
659 if (!txq_ctrl->obj || !txq_ctrl->obj->sq) {
661 DRV_LOG(ERR, "port %u no Txq object found: %d",
662 dev->data->port_id, cur_queue);
663 mlx5_txq_release(dev, cur_queue);
666 sq_attr.state = MLX5_SQC_STATE_RST;
667 sq_attr.sq_state = MLX5_SQC_STATE_RST;
668 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr);
670 txq_ctrl->hairpin_status = 0;
671 mlx5_txq_release(dev, cur_queue);
673 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, cur_queue);
674 struct mlx5_rxq_ctrl *rxq_ctrl;
675 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
679 DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
680 dev->data->port_id, cur_queue);
683 rxq_ctrl = rxq->ctrl;
684 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
686 DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
687 dev->data->port_id, cur_queue);
690 if (rxq->hairpin_status == 0) {
691 DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
692 dev->data->port_id, cur_queue);
695 if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
697 DRV_LOG(ERR, "port %u no Rxq object found: %d",
698 dev->data->port_id, cur_queue);
701 rq_attr.state = MLX5_SQC_STATE_RST;
702 rq_attr.rq_state = MLX5_SQC_STATE_RST;
703 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
705 rxq->hairpin_status = 0;
711 * Bind the hairpin port pairs, from the Tx to the peer Rx.
712 * This function only supports to bind the Tx to one Rx.
715 * Pointer to Ethernet device structure.
717 * Port identifier of the Rx port.
720 * 0 on success, a negative errno value otherwise and rte_errno is set.
723 mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
725 struct mlx5_priv *priv = dev->data->dev_private;
727 struct mlx5_txq_ctrl *txq_ctrl;
729 struct rte_hairpin_peer_info peer = {0xffffff};
730 struct rte_hairpin_peer_info cur;
731 const struct rte_eth_hairpin_conf *conf;
733 uint16_t local_port = priv->dev_data->port_id;
738 if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
740 DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
744 * Before binding TxQ to peer RxQ, first round loop will be used for
745 * checking the queues' configuration consistency. This would be a
746 * little time consuming but better than doing the rollback.
748 for (i = 0; i != priv->txqs_n; i++) {
749 txq_ctrl = mlx5_txq_get(dev, i);
750 if (txq_ctrl == NULL)
752 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
753 mlx5_txq_release(dev, i);
757 * All hairpin Tx queues of a single port that connected to the
758 * same peer Rx port should have the same "auto binding" and
759 * "implicit Tx flow" modes.
760 * Peer consistency checking will be done in per queue binding.
762 conf = &txq_ctrl->hairpin_conf;
763 if (conf->peers[0].port == rx_port) {
765 manual = conf->manual_bind;
766 explicit = conf->tx_explicit;
768 if (manual != conf->manual_bind ||
769 explicit != conf->tx_explicit) {
771 DRV_LOG(ERR, "port %u queue %d mode"
772 " mismatch: %u %u, %u %u",
773 local_port, i, manual,
774 conf->manual_bind, explicit,
776 mlx5_txq_release(dev, i);
782 mlx5_txq_release(dev, i);
784 /* Once no queue is configured, success is returned directly. */
787 /* All the hairpin TX queues need to be traversed again. */
788 for (i = 0; i != priv->txqs_n; i++) {
789 txq_ctrl = mlx5_txq_get(dev, i);
790 if (txq_ctrl == NULL)
792 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
793 mlx5_txq_release(dev, i);
796 if (txq_ctrl->hairpin_conf.peers[0].port != rx_port) {
797 mlx5_txq_release(dev, i);
800 rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
802 * Fetch peer RxQ's information.
803 * No need to pass the information of the current queue.
805 ret = rte_eth_hairpin_queue_peer_update(rx_port, rx_queue,
808 mlx5_txq_release(dev, i);
811 /* Accessing its own device, inside mlx5 PMD. */
812 ret = mlx5_hairpin_queue_peer_bind(dev, i, &peer, 1);
814 mlx5_txq_release(dev, i);
817 /* Pass TxQ's information to peer RxQ and try binding. */
818 cur.peer_q = rx_queue;
819 cur.qp_id = txq_ctrl->obj->sq->id;
820 cur.vhca_id = priv->config.hca_attr.vhca_id;
821 cur.tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
822 cur.manual_bind = txq_ctrl->hairpin_conf.manual_bind;
824 * In order to access another device in a proper way, RTE level
825 * private function is needed.
827 ret = rte_eth_hairpin_queue_peer_bind(rx_port, rx_queue,
830 mlx5_txq_release(dev, i);
833 mlx5_txq_release(dev, i);
838 * Do roll-back process for the queues already bound.
839 * No need to check the return value of the queue unbind function.
842 /* No validation is needed here. */
843 txq_ctrl = mlx5_txq_get(dev, i);
844 if (txq_ctrl == NULL)
846 rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
847 rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0);
848 mlx5_hairpin_queue_peer_unbind(dev, i, 1);
849 mlx5_txq_release(dev, i);
855 * Unbind the hairpin port pair, HW configuration of both devices will be clear
856 * and status will be reset for all the queues used between them.
857 * This function only supports to unbind the Tx from one Rx.
860 * Pointer to Ethernet device structure.
862 * Port identifier of the Rx port.
865 * 0 on success, a negative errno value otherwise and rte_errno is set.
868 mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
870 struct mlx5_priv *priv = dev->data->dev_private;
871 struct mlx5_txq_ctrl *txq_ctrl;
874 uint16_t cur_port = priv->dev_data->port_id;
876 if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
878 DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
881 for (i = 0; i != priv->txqs_n; i++) {
884 txq_ctrl = mlx5_txq_get(dev, i);
885 if (txq_ctrl == NULL)
887 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
888 mlx5_txq_release(dev, i);
891 if (txq_ctrl->hairpin_conf.peers[0].port != rx_port) {
892 mlx5_txq_release(dev, i);
895 /* Indeed, only the first used queue needs to be checked. */
896 if (txq_ctrl->hairpin_conf.manual_bind == 0) {
897 if (cur_port != rx_port) {
899 DRV_LOG(ERR, "port %u and port %u are in"
900 " auto-bind mode", cur_port, rx_port);
901 mlx5_txq_release(dev, i);
907 rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
908 mlx5_txq_release(dev, i);
909 ret = rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0);
911 DRV_LOG(ERR, "port %u Rx queue %d unbind - failure",
915 ret = mlx5_hairpin_queue_peer_unbind(dev, i, 1);
917 DRV_LOG(ERR, "port %u Tx queue %d unbind - failure",
926 * Bind hairpin ports, Rx could be all ports when using RTE_MAX_ETHPORTS.
927 * @see mlx5_hairpin_bind_single_port()
930 mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
936 * If the Rx port has no hairpin configuration with the current port,
937 * the binding will be skipped in the called function of single port.
938 * Device started status will be checked only before the queue
939 * information updating.
941 if (rx_port == RTE_MAX_ETHPORTS) {
942 MLX5_ETH_FOREACH_DEV(p, dev->device) {
943 ret = mlx5_hairpin_bind_single_port(dev, p);
949 return mlx5_hairpin_bind_single_port(dev, rx_port);
952 MLX5_ETH_FOREACH_DEV(pp, dev->device)
954 mlx5_hairpin_unbind_single_port(dev, pp);
959 * Unbind hairpin ports, Rx could be all ports when using RTE_MAX_ETHPORTS.
960 * @see mlx5_hairpin_unbind_single_port()
963 mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
968 if (rx_port == RTE_MAX_ETHPORTS)
969 MLX5_ETH_FOREACH_DEV(p, dev->device) {
970 ret = mlx5_hairpin_unbind_single_port(dev, p);
975 ret = mlx5_hairpin_unbind_single_port(dev, rx_port);
980 * DPDK callback to get the hairpin peer ports list.
981 * This will return the actual number of peer ports and save the identifiers
982 * into the array (sorted, may be different from that when setting up the
983 * hairpin peer queues).
984 * The peer port ID could be the same as the port ID of the current device.
987 * Pointer to Ethernet device structure.
989 * Pointer to array to save the port identifiers.
991 * The length of the array.
993 * Current port to peer port direction.
994 * positive - current used as Tx to get all peer Rx ports.
995 * zero - current used as Rx to get all peer Tx ports.
998 * 0 or positive value on success, actual number of peer ports.
999 * a negative errno value otherwise and rte_errno is set.
1002 mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
1003 size_t len, uint32_t direction)
1005 struct mlx5_priv *priv = dev->data->dev_private;
1006 struct mlx5_txq_ctrl *txq_ctrl;
1009 uint32_t bits[(RTE_MAX_ETHPORTS + 31) / 32] = {0};
1013 for (i = 0; i < priv->txqs_n; i++) {
1014 txq_ctrl = mlx5_txq_get(dev, i);
1017 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
1018 mlx5_txq_release(dev, i);
1021 pp = txq_ctrl->hairpin_conf.peers[0].port;
1022 if (pp >= RTE_MAX_ETHPORTS) {
1024 mlx5_txq_release(dev, i);
1025 DRV_LOG(ERR, "port %hu queue %u peer port "
1027 priv->dev_data->port_id, i, pp);
1030 bits[pp / 32] |= 1 << (pp % 32);
1031 mlx5_txq_release(dev, i);
1034 for (i = 0; i < priv->rxqs_n; i++) {
1035 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
1036 struct mlx5_rxq_ctrl *rxq_ctrl;
1040 rxq_ctrl = rxq->ctrl;
1041 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN)
1043 pp = rxq->hairpin_conf.peers[0].port;
1044 if (pp >= RTE_MAX_ETHPORTS) {
1046 DRV_LOG(ERR, "port %hu queue %u peer port "
1048 priv->dev_data->port_id, i, pp);
1051 bits[pp / 32] |= 1 << (pp % 32);
1054 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1055 if (bits[i / 32] & (1 << (i % 32))) {
1056 if ((size_t)ret >= len) {
1060 peer_ports[ret++] = i;
1067 * DPDK callback to start the device.
1069 * Simulate device start by attaching all configured flows.
1072 * Pointer to Ethernet device structure.
1075 * 0 on success, a negative errno value otherwise and rte_errno is set.
1078 mlx5_dev_start(struct rte_eth_dev *dev)
1080 struct mlx5_priv *priv = dev->data->dev_private;
1084 DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
1085 fine_inline = rte_mbuf_dynflag_lookup
1086 (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
1087 if (fine_inline >= 0)
1088 rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
1090 rte_net_mlx5_dynf_inline_mask = 0;
1091 if (dev->data->nb_rx_queues > 0) {
1092 ret = mlx5_dev_configure_rss_reta(dev);
1094 DRV_LOG(ERR, "port %u reta config failed: %s",
1095 dev->data->port_id, strerror(rte_errno));
1099 ret = mlx5_txpp_start(dev);
1101 DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s",
1102 dev->data->port_id, strerror(rte_errno));
1105 if ((priv->sh->devx && priv->config.dv_flow_en &&
1106 priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
1107 ret = priv->obj_ops.lb_dummy_queue_create(dev);
1111 ret = mlx5_txq_start(dev);
1113 DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
1114 dev->data->port_id, strerror(rte_errno));
1117 if (priv->config.std_delay_drop || priv->config.hp_delay_drop) {
1118 if (!priv->config.vf && !priv->config.sf &&
1119 !priv->representor) {
1120 ret = mlx5_get_flag_dropless_rq(dev);
1123 "port %u cannot query dropless flag",
1124 dev->data->port_id);
1127 "port %u dropless_rq OFF, no rearming",
1128 dev->data->port_id);
1131 "port %u doesn't support dropless_rq flag",
1132 dev->data->port_id);
1135 ret = mlx5_rxq_start(dev);
1137 DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
1138 dev->data->port_id, strerror(rte_errno));
1142 * Such step will be skipped if there is no hairpin TX queue configured
1143 * with RX peer queue from the same device.
1145 ret = mlx5_hairpin_auto_bind(dev);
1147 DRV_LOG(ERR, "port %u hairpin auto binding failed: %s",
1148 dev->data->port_id, strerror(rte_errno));
1151 /* Set started flag here for the following steps like control flow. */
1152 dev->data->dev_started = 1;
1153 ret = mlx5_rx_intr_vec_enable(dev);
1155 DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
1156 dev->data->port_id);
1159 mlx5_os_stats_init(dev);
1161 * Attach indirection table objects detached on port stop.
1162 * They may be needed to create RSS in non-isolated mode.
1164 ret = mlx5_action_handle_attach(dev);
1167 "port %u failed to attach indirect actions: %s",
1168 dev->data->port_id, rte_strerror(rte_errno));
1171 ret = mlx5_traffic_enable(dev);
1173 DRV_LOG(ERR, "port %u failed to set defaults flows",
1174 dev->data->port_id);
1177 /* Set a mask and offset of dynamic metadata flows into Rx queues. */
1178 mlx5_flow_rxq_dynf_metadata_set(dev);
1179 /* Set flags and context to convert Rx timestamps. */
1180 mlx5_rxq_timestamp_set(dev);
1181 /* Set a mask and offset of scheduling on timestamp into Tx queues. */
1182 mlx5_txq_dynf_timestamp_set(dev);
1184 * In non-cached mode, it only needs to start the default mreg copy
1185 * action and no flow created by application exists anymore.
1186 * But it is worth wrapping the interface for further usage.
1188 ret = mlx5_flow_start_default(dev);
1190 DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
1191 dev->data->port_id, strerror(rte_errno));
1194 if (mlx5_dev_ctx_shared_mempool_subscribe(dev) != 0) {
1195 DRV_LOG(ERR, "port %u failed to subscribe for mempool life cycle: %s",
1196 dev->data->port_id, rte_strerror(rte_errno));
1200 dev->tx_pkt_burst = mlx5_select_tx_function(dev);
1201 dev->rx_pkt_burst = mlx5_select_rx_function(dev);
1202 /* Enable datapath on secondary process. */
1203 mlx5_mp_os_req_start_rxtx(dev);
1204 if (rte_intr_fd_get(priv->sh->intr_handle) >= 0) {
1205 priv->sh->port[priv->dev_port - 1].ih_port_id =
1206 (uint32_t)dev->data->port_id;
1208 DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
1209 dev->data->port_id);
1210 dev->data->dev_conf.intr_conf.lsc = 0;
1211 dev->data->dev_conf.intr_conf.rmv = 0;
1213 if (rte_intr_fd_get(priv->sh->intr_handle_devx) >= 0)
1214 priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
1215 (uint32_t)dev->data->port_id;
1218 ret = rte_errno; /* Save rte_errno before cleanup. */
1220 dev->data->dev_started = 0;
1221 mlx5_flow_stop_default(dev);
1222 mlx5_traffic_disable(dev);
1225 if (priv->obj_ops.lb_dummy_queue_release)
1226 priv->obj_ops.lb_dummy_queue_release(dev);
1227 mlx5_txpp_stop(dev); /* Stop last. */
1228 rte_errno = ret; /* Restore rte_errno. */
1233 * DPDK callback to stop the device.
1235 * Simulate device stop by detaching all configured flows.
1238 * Pointer to Ethernet device structure.
1241 mlx5_dev_stop(struct rte_eth_dev *dev)
1243 struct mlx5_priv *priv = dev->data->dev_private;
1245 dev->data->dev_started = 0;
1246 /* Prevent crashes when queues are still in use. */
1247 dev->rx_pkt_burst = removed_rx_burst;
1248 dev->tx_pkt_burst = removed_tx_burst;
1250 /* Disable datapath on secondary process. */
1251 mlx5_mp_os_req_stop_rxtx(dev);
1252 rte_delay_us_sleep(1000 * priv->rxqs_n);
1253 DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
1254 mlx5_flow_stop_default(dev);
1255 /* Control flows for default traffic can be removed firstly. */
1256 mlx5_traffic_disable(dev);
1257 /* All RX queue flags will be cleared in the flush interface. */
1258 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
1259 mlx5_flow_meter_rxq_flush(dev);
1260 mlx5_action_handle_detach(dev);
1261 mlx5_rx_intr_vec_disable(dev);
1262 priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
1263 priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
1266 if (priv->obj_ops.lb_dummy_queue_release)
1267 priv->obj_ops.lb_dummy_queue_release(dev);
1268 mlx5_txpp_stop(dev);
1274 * Enable traffic flows configured by control plane
1277 * Pointer to Ethernet device private data.
1279 * Pointer to Ethernet device structure.
1282 * 0 on success, a negative errno value otherwise and rte_errno is set.
1285 mlx5_traffic_enable(struct rte_eth_dev *dev)
1287 struct mlx5_priv *priv = dev->data->dev_private;
1288 struct rte_flow_item_eth bcast = {
1289 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1291 struct rte_flow_item_eth ipv6_multi_spec = {
1292 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
1294 struct rte_flow_item_eth ipv6_multi_mask = {
1295 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
1297 struct rte_flow_item_eth unicast = {
1298 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1300 struct rte_flow_item_eth unicast_mask = {
1301 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1303 const unsigned int vlan_filter_n = priv->vlan_filter_n;
1304 const struct rte_ether_addr cmp = {
1305 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
1312 * Hairpin txq default flow should be created no matter if it is
1313 * isolation mode. Or else all the packets to be sent will be sent
1314 * out directly without the TX flow actions, e.g. encapsulation.
1316 for (i = 0; i != priv->txqs_n; ++i) {
1317 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
1320 /* Only Tx implicit mode requires the default Tx flow. */
1321 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN &&
1322 txq_ctrl->hairpin_conf.tx_explicit == 0 &&
1323 txq_ctrl->hairpin_conf.peers[0].port ==
1324 priv->dev_data->port_id) {
1325 ret = mlx5_ctrl_flow_source_queue(dev, i);
1327 mlx5_txq_release(dev, i);
1331 if ((priv->representor || priv->master) &&
1332 priv->config.dv_esw_en) {
1333 if (mlx5_flow_create_devx_sq_miss_flow(dev, i) == 0) {
1335 "Port %u Tx queue %u SQ create representor devx default miss rule failed.",
1336 dev->data->port_id, i);
1340 mlx5_txq_release(dev, i);
1342 if ((priv->master || priv->representor) && priv->config.dv_esw_en) {
1343 if (mlx5_flow_create_esw_table_zero_flow(dev))
1344 priv->fdb_def_rule = 1;
1346 DRV_LOG(INFO, "port %u FDB default rule cannot be"
1347 " configured - only Eswitch group 0 flows are"
1348 " supported.", dev->data->port_id);
1350 if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
1351 ret = mlx5_flow_lacp_miss(dev);
1353 DRV_LOG(INFO, "port %u LACP rule cannot be created - "
1354 "forward LACP to kernel.", dev->data->port_id);
1356 DRV_LOG(INFO, "LACP traffic will be missed in port %u."
1357 , dev->data->port_id);
1361 if (dev->data->promiscuous) {
1362 struct rte_flow_item_eth promisc = {
1363 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1364 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1368 ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
1372 if (dev->data->all_multicast) {
1373 struct rte_flow_item_eth multicast = {
1374 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1375 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1379 ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
1383 /* Add broadcast/multicast flows. */
1384 for (i = 0; i != vlan_filter_n; ++i) {
1385 uint16_t vlan = priv->vlan_filter[i];
1387 struct rte_flow_item_vlan vlan_spec = {
1388 .tci = rte_cpu_to_be_16(vlan),
1390 struct rte_flow_item_vlan vlan_mask =
1391 rte_flow_item_vlan_mask;
1393 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
1394 &vlan_spec, &vlan_mask);
1397 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
1399 &vlan_spec, &vlan_mask);
1403 if (!vlan_filter_n) {
1404 ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
1407 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
1410 /* Do not fail on IPv6 broadcast creation failure. */
1412 "IPv6 broadcast is not supported");
1417 /* Add MAC address flows. */
1418 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
1419 struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
1421 if (!memcmp(mac, &cmp, sizeof(*mac)))
1423 memcpy(&unicast.dst.addr_bytes,
1425 RTE_ETHER_ADDR_LEN);
1426 for (j = 0; j != vlan_filter_n; ++j) {
1427 uint16_t vlan = priv->vlan_filter[j];
1429 struct rte_flow_item_vlan vlan_spec = {
1430 .tci = rte_cpu_to_be_16(vlan),
1432 struct rte_flow_item_vlan vlan_mask =
1433 rte_flow_item_vlan_mask;
1435 ret = mlx5_ctrl_flow_vlan(dev, &unicast,
1442 if (!vlan_filter_n) {
1443 ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
1450 ret = rte_errno; /* Save rte_errno before cleanup. */
1451 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
1452 rte_errno = ret; /* Restore rte_errno. */
1458 * Disable traffic flows configured by control plane
1461 * Pointer to Ethernet device private data.
1464 mlx5_traffic_disable(struct rte_eth_dev *dev)
1466 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
1470 * Restart traffic flows configured by control plane
1473 * Pointer to Ethernet device private data.
1476 * 0 on success, a negative errno value otherwise and rte_errno is set.
1479 mlx5_traffic_restart(struct rte_eth_dev *dev)
1481 if (dev->data->dev_started) {
1482 mlx5_traffic_disable(dev);
1483 return mlx5_traffic_enable(dev);