1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
9 #include <ethdev_driver.h>
10 #include <rte_interrupts.h>
11 #include <rte_alarm.h>
12 #include <rte_cycles.h>
14 #include <mlx5_malloc.h>
20 #include "mlx5_utils.h"
21 #include "rte_pmd_mlx5.h"
24 * Stop traffic on Tx queues.
27 * Pointer to Ethernet device structure.
30 mlx5_txq_stop(struct rte_eth_dev *dev)
32 struct mlx5_priv *priv = dev->data->dev_private;
35 for (i = 0; i != priv->txqs_n; ++i)
36 mlx5_txq_release(dev, i);
40 * Start traffic on Tx queues.
43 * Pointer to Ethernet device structure.
46 * 0 on success, a negative errno value otherwise and rte_errno is set.
49 mlx5_txq_start(struct rte_eth_dev *dev)
51 struct mlx5_priv *priv = dev->data->dev_private;
55 for (i = 0; i != priv->txqs_n; ++i) {
56 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
57 struct mlx5_txq_data *txq_data = &txq_ctrl->txq;
58 uint32_t flags = MLX5_MEM_RTE | MLX5_MEM_ZERO;
62 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
63 txq_alloc_elts(txq_ctrl);
64 MLX5_ASSERT(!txq_ctrl->obj);
65 txq_ctrl->obj = mlx5_malloc(flags, sizeof(struct mlx5_txq_obj),
68 DRV_LOG(ERR, "Port %u Tx queue %u cannot allocate "
69 "memory resources.", dev->data->port_id,
74 ret = priv->obj_ops.txq_obj_new(dev, i);
76 mlx5_free(txq_ctrl->obj);
80 if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD) {
81 size_t size = txq_data->cqe_s * sizeof(*txq_data->fcqs);
83 txq_data->fcqs = mlx5_malloc(flags, size,
86 if (!txq_data->fcqs) {
87 DRV_LOG(ERR, "Port %u Tx queue %u cannot "
88 "allocate memory (FCQ).",
89 dev->data->port_id, i);
94 DRV_LOG(DEBUG, "Port %u txq %u updated with %p.",
95 dev->data->port_id, i, (void *)&txq_ctrl->obj);
96 LIST_INSERT_HEAD(&priv->txqsobj, txq_ctrl->obj, next);
100 ret = rte_errno; /* Save rte_errno before cleanup. */
102 mlx5_txq_release(dev, i);
104 rte_errno = ret; /* Restore rte_errno. */
109 * Stop traffic on Rx queues.
112 * Pointer to Ethernet device structure.
115 mlx5_rxq_stop(struct rte_eth_dev *dev)
117 struct mlx5_priv *priv = dev->data->dev_private;
120 for (i = 0; i != priv->rxqs_n; ++i)
121 mlx5_rxq_release(dev, i);
125 * Start traffic on Rx queues.
128 * Pointer to Ethernet device structure.
131 * 0 on success, a negative errno value otherwise and rte_errno is set.
134 mlx5_rxq_start(struct rte_eth_dev *dev)
136 struct mlx5_priv *priv = dev->data->dev_private;
140 /* Allocate/reuse/resize mempool for Multi-Packet RQ. */
141 if (mlx5_mprq_alloc_mp(dev)) {
142 /* Should not release Rx queues but return immediately. */
145 DRV_LOG(DEBUG, "Port %u device_attr.max_qp_wr is %d.",
146 dev->data->port_id, priv->sh->device_attr.max_qp_wr);
147 DRV_LOG(DEBUG, "Port %u device_attr.max_sge is %d.",
148 dev->data->port_id, priv->sh->device_attr.max_sge);
149 for (i = 0; i != priv->rxqs_n; ++i) {
150 struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
154 if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
155 /* Pre-register Rx mempools. */
156 if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq)) {
157 mlx5_mr_update_mp(dev, &rxq_ctrl->rxq.mr_ctrl,
158 rxq_ctrl->rxq.mprq_mp);
162 for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++)
164 (dev, &rxq_ctrl->rxq.mr_ctrl,
165 rxq_ctrl->rxq.rxseg[s].mp);
167 ret = rxq_alloc_elts(rxq_ctrl);
171 MLX5_ASSERT(!rxq_ctrl->obj);
172 rxq_ctrl->obj = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
173 sizeof(*rxq_ctrl->obj), 0,
175 if (!rxq_ctrl->obj) {
177 "Port %u Rx queue %u can't allocate resources.",
178 dev->data->port_id, (*priv->rxqs)[i]->idx);
182 ret = priv->obj_ops.rxq_obj_new(dev, i);
184 mlx5_free(rxq_ctrl->obj);
187 DRV_LOG(DEBUG, "Port %u rxq %u updated with %p.",
188 dev->data->port_id, i, (void *)&rxq_ctrl->obj);
189 LIST_INSERT_HEAD(&priv->rxqsobj, rxq_ctrl->obj, next);
193 ret = rte_errno; /* Save rte_errno before cleanup. */
195 mlx5_rxq_release(dev, i);
197 rte_errno = ret; /* Restore rte_errno. */
202 * Binds Tx queues to Rx queues for hairpin.
204 * Binds Tx queues to the target Rx queues.
207 * Pointer to Ethernet device structure.
210 * 0 on success, a negative errno value otherwise and rte_errno is set.
213 mlx5_hairpin_auto_bind(struct rte_eth_dev *dev)
215 struct mlx5_priv *priv = dev->data->dev_private;
216 struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
217 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
218 struct mlx5_txq_ctrl *txq_ctrl;
219 struct mlx5_rxq_ctrl *rxq_ctrl;
220 struct mlx5_devx_obj *sq;
221 struct mlx5_devx_obj *rq;
224 bool need_auto = false;
225 uint16_t self_port = dev->data->port_id;
227 for (i = 0; i != priv->txqs_n; ++i) {
228 txq_ctrl = mlx5_txq_get(dev, i);
231 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
232 txq_ctrl->hairpin_conf.peers[0].port != self_port) {
233 mlx5_txq_release(dev, i);
236 if (txq_ctrl->hairpin_conf.manual_bind) {
237 mlx5_txq_release(dev, i);
241 mlx5_txq_release(dev, i);
245 for (i = 0; i != priv->txqs_n; ++i) {
246 txq_ctrl = mlx5_txq_get(dev, i);
249 /* Skip hairpin queues with other peer ports. */
250 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN ||
251 txq_ctrl->hairpin_conf.peers[0].port != self_port) {
252 mlx5_txq_release(dev, i);
255 if (!txq_ctrl->obj) {
257 DRV_LOG(ERR, "port %u no txq object found: %d",
258 dev->data->port_id, i);
259 mlx5_txq_release(dev, i);
262 sq = txq_ctrl->obj->sq;
263 rxq_ctrl = mlx5_rxq_get(dev,
264 txq_ctrl->hairpin_conf.peers[0].queue);
266 mlx5_txq_release(dev, i);
268 DRV_LOG(ERR, "port %u no rxq object found: %d",
270 txq_ctrl->hairpin_conf.peers[0].queue);
273 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
274 rxq_ctrl->hairpin_conf.peers[0].queue != i) {
276 DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
277 "Rx queue %d", dev->data->port_id,
278 i, txq_ctrl->hairpin_conf.peers[0].queue);
281 rq = rxq_ctrl->obj->rq;
284 DRV_LOG(ERR, "port %u hairpin no matching rxq: %d",
286 txq_ctrl->hairpin_conf.peers[0].queue);
289 sq_attr.state = MLX5_SQC_STATE_RDY;
290 sq_attr.sq_state = MLX5_SQC_STATE_RST;
291 sq_attr.hairpin_peer_rq = rq->id;
292 sq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
293 ret = mlx5_devx_cmd_modify_sq(sq, &sq_attr);
296 rq_attr.state = MLX5_SQC_STATE_RDY;
297 rq_attr.rq_state = MLX5_SQC_STATE_RST;
298 rq_attr.hairpin_peer_sq = sq->id;
299 rq_attr.hairpin_peer_vhca = priv->config.hca_attr.vhca_id;
300 ret = mlx5_devx_cmd_modify_rq(rq, &rq_attr);
303 /* Qs with auto-bind will be destroyed directly. */
304 rxq_ctrl->hairpin_status = 1;
305 txq_ctrl->hairpin_status = 1;
306 mlx5_txq_release(dev, i);
307 mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
311 mlx5_txq_release(dev, i);
312 mlx5_rxq_release(dev, txq_ctrl->hairpin_conf.peers[0].queue);
317 * Fetch the peer queue's SW & HW information.
320 * Pointer to Ethernet device structure.
322 * Index of the queue to fetch the information.
323 * @param current_info
324 * Pointer to the input peer information, not used currently.
326 * Pointer to the structure to store the information, output.
328 * Positive to get the RxQ information, zero to get the TxQ information.
331 * 0 on success, a negative errno value otherwise and rte_errno is set.
334 mlx5_hairpin_queue_peer_update(struct rte_eth_dev *dev, uint16_t peer_queue,
335 struct rte_hairpin_peer_info *current_info,
336 struct rte_hairpin_peer_info *peer_info,
339 struct mlx5_priv *priv = dev->data->dev_private;
340 RTE_SET_USED(current_info);
342 if (dev->data->dev_started == 0) {
344 DRV_LOG(ERR, "peer port %u is not started",
349 * Peer port used as egress. In the current design, hairpin Tx queue
350 * will be bound to the peer Rx queue. Indeed, only the information of
351 * peer Rx queue needs to be fetched.
353 if (direction == 0) {
354 struct mlx5_txq_ctrl *txq_ctrl;
356 txq_ctrl = mlx5_txq_get(dev, peer_queue);
357 if (txq_ctrl == NULL) {
359 DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
360 dev->data->port_id, peer_queue);
363 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
365 DRV_LOG(ERR, "port %u queue %d is not a hairpin Txq",
366 dev->data->port_id, peer_queue);
367 mlx5_txq_release(dev, peer_queue);
370 if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) {
372 DRV_LOG(ERR, "port %u no Txq object found: %d",
373 dev->data->port_id, peer_queue);
374 mlx5_txq_release(dev, peer_queue);
377 peer_info->qp_id = txq_ctrl->obj->sq->id;
378 peer_info->vhca_id = priv->config.hca_attr.vhca_id;
379 /* 1-to-1 mapping, only the first one is used. */
380 peer_info->peer_q = txq_ctrl->hairpin_conf.peers[0].queue;
381 peer_info->tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
382 peer_info->manual_bind = txq_ctrl->hairpin_conf.manual_bind;
383 mlx5_txq_release(dev, peer_queue);
384 } else { /* Peer port used as ingress. */
385 struct mlx5_rxq_ctrl *rxq_ctrl;
387 rxq_ctrl = mlx5_rxq_get(dev, peer_queue);
388 if (rxq_ctrl == NULL) {
390 DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
391 dev->data->port_id, peer_queue);
394 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
396 DRV_LOG(ERR, "port %u queue %d is not a hairpin Rxq",
397 dev->data->port_id, peer_queue);
398 mlx5_rxq_release(dev, peer_queue);
401 if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
403 DRV_LOG(ERR, "port %u no Rxq object found: %d",
404 dev->data->port_id, peer_queue);
405 mlx5_rxq_release(dev, peer_queue);
408 peer_info->qp_id = rxq_ctrl->obj->rq->id;
409 peer_info->vhca_id = priv->config.hca_attr.vhca_id;
410 peer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue;
411 peer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit;
412 peer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind;
413 mlx5_rxq_release(dev, peer_queue);
419 * Bind the hairpin queue with the peer HW information.
420 * This needs to be called twice both for Tx and Rx queues of a pair.
421 * If the queue is already bound, it is considered successful.
424 * Pointer to Ethernet device structure.
426 * Index of the queue to change the HW configuration to bind.
428 * Pointer to information of the peer queue.
430 * Positive to configure the TxQ, zero to configure the RxQ.
433 * 0 on success, a negative errno value otherwise and rte_errno is set.
436 mlx5_hairpin_queue_peer_bind(struct rte_eth_dev *dev, uint16_t cur_queue,
437 struct rte_hairpin_peer_info *peer_info,
443 * Consistency checking of the peer queue: opposite direction is used
444 * to get the peer queue info with ethdev port ID, no need to check.
446 if (peer_info->peer_q != cur_queue) {
448 DRV_LOG(ERR, "port %u queue %d and peer queue %d mismatch",
449 dev->data->port_id, cur_queue, peer_info->peer_q);
452 if (direction != 0) {
453 struct mlx5_txq_ctrl *txq_ctrl;
454 struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
456 txq_ctrl = mlx5_txq_get(dev, cur_queue);
457 if (txq_ctrl == NULL) {
459 DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
460 dev->data->port_id, cur_queue);
463 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
465 DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
466 dev->data->port_id, cur_queue);
467 mlx5_txq_release(dev, cur_queue);
470 if (txq_ctrl->obj == NULL || txq_ctrl->obj->sq == NULL) {
472 DRV_LOG(ERR, "port %u no Txq object found: %d",
473 dev->data->port_id, cur_queue);
474 mlx5_txq_release(dev, cur_queue);
477 if (txq_ctrl->hairpin_status != 0) {
478 DRV_LOG(DEBUG, "port %u Tx queue %d is already bound",
479 dev->data->port_id, cur_queue);
480 mlx5_txq_release(dev, cur_queue);
484 * All queues' of one port consistency checking is done in the
485 * bind() function, and that is optional.
487 if (peer_info->tx_explicit !=
488 txq_ctrl->hairpin_conf.tx_explicit) {
490 DRV_LOG(ERR, "port %u Tx queue %d and peer Tx rule mode"
491 " mismatch", dev->data->port_id, cur_queue);
492 mlx5_txq_release(dev, cur_queue);
495 if (peer_info->manual_bind !=
496 txq_ctrl->hairpin_conf.manual_bind) {
498 DRV_LOG(ERR, "port %u Tx queue %d and peer binding mode"
499 " mismatch", dev->data->port_id, cur_queue);
500 mlx5_txq_release(dev, cur_queue);
503 sq_attr.state = MLX5_SQC_STATE_RDY;
504 sq_attr.sq_state = MLX5_SQC_STATE_RST;
505 sq_attr.hairpin_peer_rq = peer_info->qp_id;
506 sq_attr.hairpin_peer_vhca = peer_info->vhca_id;
507 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr);
509 txq_ctrl->hairpin_status = 1;
510 mlx5_txq_release(dev, cur_queue);
512 struct mlx5_rxq_ctrl *rxq_ctrl;
513 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
515 rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
516 if (rxq_ctrl == NULL) {
518 DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
519 dev->data->port_id, cur_queue);
522 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
524 DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
525 dev->data->port_id, cur_queue);
526 mlx5_rxq_release(dev, cur_queue);
529 if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
531 DRV_LOG(ERR, "port %u no Rxq object found: %d",
532 dev->data->port_id, cur_queue);
533 mlx5_rxq_release(dev, cur_queue);
536 if (rxq_ctrl->hairpin_status != 0) {
537 DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
538 dev->data->port_id, cur_queue);
539 mlx5_rxq_release(dev, cur_queue);
542 if (peer_info->tx_explicit !=
543 rxq_ctrl->hairpin_conf.tx_explicit) {
545 DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
546 " mismatch", dev->data->port_id, cur_queue);
547 mlx5_rxq_release(dev, cur_queue);
550 if (peer_info->manual_bind !=
551 rxq_ctrl->hairpin_conf.manual_bind) {
553 DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
554 " mismatch", dev->data->port_id, cur_queue);
555 mlx5_rxq_release(dev, cur_queue);
558 rq_attr.state = MLX5_SQC_STATE_RDY;
559 rq_attr.rq_state = MLX5_SQC_STATE_RST;
560 rq_attr.hairpin_peer_sq = peer_info->qp_id;
561 rq_attr.hairpin_peer_vhca = peer_info->vhca_id;
562 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
564 rxq_ctrl->hairpin_status = 1;
565 mlx5_rxq_release(dev, cur_queue);
571 * Unbind the hairpin queue and reset its HW configuration.
572 * This needs to be called twice both for Tx and Rx queues of a pair.
573 * If the queue is already unbound, it is considered successful.
576 * Pointer to Ethernet device structure.
578 * Index of the queue to change the HW configuration to unbind.
580 * Positive to reset the TxQ, zero to reset the RxQ.
583 * 0 on success, a negative errno value otherwise and rte_errno is set.
586 mlx5_hairpin_queue_peer_unbind(struct rte_eth_dev *dev, uint16_t cur_queue,
591 if (direction != 0) {
592 struct mlx5_txq_ctrl *txq_ctrl;
593 struct mlx5_devx_modify_sq_attr sq_attr = { 0 };
595 txq_ctrl = mlx5_txq_get(dev, cur_queue);
596 if (txq_ctrl == NULL) {
598 DRV_LOG(ERR, "Failed to get port %u Tx queue %d",
599 dev->data->port_id, cur_queue);
602 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
604 DRV_LOG(ERR, "port %u queue %d not a hairpin Txq",
605 dev->data->port_id, cur_queue);
606 mlx5_txq_release(dev, cur_queue);
609 /* Already unbound, return success before obj checking. */
610 if (txq_ctrl->hairpin_status == 0) {
611 DRV_LOG(DEBUG, "port %u Tx queue %d is already unbound",
612 dev->data->port_id, cur_queue);
613 mlx5_txq_release(dev, cur_queue);
616 if (!txq_ctrl->obj || !txq_ctrl->obj->sq) {
618 DRV_LOG(ERR, "port %u no Txq object found: %d",
619 dev->data->port_id, cur_queue);
620 mlx5_txq_release(dev, cur_queue);
623 sq_attr.state = MLX5_SQC_STATE_RST;
624 sq_attr.sq_state = MLX5_SQC_STATE_RST;
625 ret = mlx5_devx_cmd_modify_sq(txq_ctrl->obj->sq, &sq_attr);
627 txq_ctrl->hairpin_status = 0;
628 mlx5_txq_release(dev, cur_queue);
630 struct mlx5_rxq_ctrl *rxq_ctrl;
631 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
633 rxq_ctrl = mlx5_rxq_get(dev, cur_queue);
634 if (rxq_ctrl == NULL) {
636 DRV_LOG(ERR, "Failed to get port %u Rx queue %d",
637 dev->data->port_id, cur_queue);
640 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
642 DRV_LOG(ERR, "port %u queue %d not a hairpin Rxq",
643 dev->data->port_id, cur_queue);
644 mlx5_rxq_release(dev, cur_queue);
647 if (rxq_ctrl->hairpin_status == 0) {
648 DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
649 dev->data->port_id, cur_queue);
650 mlx5_rxq_release(dev, cur_queue);
653 if (rxq_ctrl->obj == NULL || rxq_ctrl->obj->rq == NULL) {
655 DRV_LOG(ERR, "port %u no Rxq object found: %d",
656 dev->data->port_id, cur_queue);
657 mlx5_rxq_release(dev, cur_queue);
660 rq_attr.state = MLX5_SQC_STATE_RST;
661 rq_attr.rq_state = MLX5_SQC_STATE_RST;
662 ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
664 rxq_ctrl->hairpin_status = 0;
665 mlx5_rxq_release(dev, cur_queue);
671 * Bind the hairpin port pairs, from the Tx to the peer Rx.
672 * This function only supports to bind the Tx to one Rx.
675 * Pointer to Ethernet device structure.
677 * Port identifier of the Rx port.
680 * 0 on success, a negative errno value otherwise and rte_errno is set.
683 mlx5_hairpin_bind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
685 struct mlx5_priv *priv = dev->data->dev_private;
687 struct mlx5_txq_ctrl *txq_ctrl;
689 struct rte_hairpin_peer_info peer = {0xffffff};
690 struct rte_hairpin_peer_info cur;
691 const struct rte_eth_hairpin_conf *conf;
693 uint16_t local_port = priv->dev_data->port_id;
698 if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
700 DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
704 * Before binding TxQ to peer RxQ, first round loop will be used for
705 * checking the queues' configuration consistency. This would be a
706 * little time consuming but better than doing the rollback.
708 for (i = 0; i != priv->txqs_n; i++) {
709 txq_ctrl = mlx5_txq_get(dev, i);
710 if (txq_ctrl == NULL)
712 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
713 mlx5_txq_release(dev, i);
717 * All hairpin Tx queues of a single port that connected to the
718 * same peer Rx port should have the same "auto binding" and
719 * "implicit Tx flow" modes.
720 * Peer consistency checking will be done in per queue binding.
722 conf = &txq_ctrl->hairpin_conf;
723 if (conf->peers[0].port == rx_port) {
725 manual = conf->manual_bind;
726 explicit = conf->tx_explicit;
728 if (manual != conf->manual_bind ||
729 explicit != conf->tx_explicit) {
731 DRV_LOG(ERR, "port %u queue %d mode"
732 " mismatch: %u %u, %u %u",
733 local_port, i, manual,
734 conf->manual_bind, explicit,
736 mlx5_txq_release(dev, i);
742 mlx5_txq_release(dev, i);
744 /* Once no queue is configured, success is returned directly. */
747 /* All the hairpin TX queues need to be traversed again. */
748 for (i = 0; i != priv->txqs_n; i++) {
749 txq_ctrl = mlx5_txq_get(dev, i);
750 if (txq_ctrl == NULL)
752 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
753 mlx5_txq_release(dev, i);
756 if (txq_ctrl->hairpin_conf.peers[0].port != rx_port) {
757 mlx5_txq_release(dev, i);
760 rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
762 * Fetch peer RxQ's information.
763 * No need to pass the information of the current queue.
765 ret = rte_eth_hairpin_queue_peer_update(rx_port, rx_queue,
768 mlx5_txq_release(dev, i);
771 /* Accessing its own device, inside mlx5 PMD. */
772 ret = mlx5_hairpin_queue_peer_bind(dev, i, &peer, 1);
774 mlx5_txq_release(dev, i);
777 /* Pass TxQ's information to peer RxQ and try binding. */
778 cur.peer_q = rx_queue;
779 cur.qp_id = txq_ctrl->obj->sq->id;
780 cur.vhca_id = priv->config.hca_attr.vhca_id;
781 cur.tx_explicit = txq_ctrl->hairpin_conf.tx_explicit;
782 cur.manual_bind = txq_ctrl->hairpin_conf.manual_bind;
784 * In order to access another device in a proper way, RTE level
785 * private function is needed.
787 ret = rte_eth_hairpin_queue_peer_bind(rx_port, rx_queue,
790 mlx5_txq_release(dev, i);
793 mlx5_txq_release(dev, i);
798 * Do roll-back process for the queues already bound.
799 * No need to check the return value of the queue unbind function.
802 /* No validation is needed here. */
803 txq_ctrl = mlx5_txq_get(dev, i);
804 if (txq_ctrl == NULL)
806 rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
807 rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0);
808 mlx5_hairpin_queue_peer_unbind(dev, i, 1);
809 mlx5_txq_release(dev, i);
815 * Unbind the hairpin port pair, HW configuration of both devices will be clear
816 * and status will be reset for all the queues used between the them.
817 * This function only supports to unbind the Tx from one Rx.
820 * Pointer to Ethernet device structure.
822 * Port identifier of the Rx port.
825 * 0 on success, a negative errno value otherwise and rte_errno is set.
828 mlx5_hairpin_unbind_single_port(struct rte_eth_dev *dev, uint16_t rx_port)
830 struct mlx5_priv *priv = dev->data->dev_private;
831 struct mlx5_txq_ctrl *txq_ctrl;
834 uint16_t cur_port = priv->dev_data->port_id;
836 if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
838 DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
841 for (i = 0; i != priv->txqs_n; i++) {
844 txq_ctrl = mlx5_txq_get(dev, i);
845 if (txq_ctrl == NULL)
847 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
848 mlx5_txq_release(dev, i);
851 if (txq_ctrl->hairpin_conf.peers[0].port != rx_port) {
852 mlx5_txq_release(dev, i);
855 /* Indeed, only the first used queue needs to be checked. */
856 if (txq_ctrl->hairpin_conf.manual_bind == 0) {
857 if (cur_port != rx_port) {
859 DRV_LOG(ERR, "port %u and port %u are in"
860 " auto-bind mode", cur_port, rx_port);
861 mlx5_txq_release(dev, i);
867 rx_queue = txq_ctrl->hairpin_conf.peers[0].queue;
868 mlx5_txq_release(dev, i);
869 ret = rte_eth_hairpin_queue_peer_unbind(rx_port, rx_queue, 0);
871 DRV_LOG(ERR, "port %u Rx queue %d unbind - failure",
875 ret = mlx5_hairpin_queue_peer_unbind(dev, i, 1);
877 DRV_LOG(ERR, "port %u Tx queue %d unbind - failure",
886 * Bind hairpin ports, Rx could be all ports when using RTE_MAX_ETHPORTS.
887 * @see mlx5_hairpin_bind_single_port()
890 mlx5_hairpin_bind(struct rte_eth_dev *dev, uint16_t rx_port)
896 * If the Rx port has no hairpin configuration with the current port,
897 * the binding will be skipped in the called function of single port.
898 * Device started status will be checked only before the queue
899 * information updating.
901 if (rx_port == RTE_MAX_ETHPORTS) {
902 MLX5_ETH_FOREACH_DEV(p, dev->device) {
903 ret = mlx5_hairpin_bind_single_port(dev, p);
909 return mlx5_hairpin_bind_single_port(dev, rx_port);
912 MLX5_ETH_FOREACH_DEV(pp, dev->device)
914 mlx5_hairpin_unbind_single_port(dev, pp);
919 * Unbind hairpin ports, Rx could be all ports when using RTE_MAX_ETHPORTS.
920 * @see mlx5_hairpin_unbind_single_port()
923 mlx5_hairpin_unbind(struct rte_eth_dev *dev, uint16_t rx_port)
928 if (rx_port == RTE_MAX_ETHPORTS)
929 MLX5_ETH_FOREACH_DEV(p, dev->device) {
930 ret = mlx5_hairpin_unbind_single_port(dev, p);
935 ret = mlx5_hairpin_unbind_single_port(dev, rx_port);
940 * DPDK callback to get the hairpin peer ports list.
941 * This will return the actual number of peer ports and save the identifiers
942 * into the array (sorted, may be different from that when setting up the
943 * hairpin peer queues).
944 * The peer port ID could be the same as the port ID of the current device.
947 * Pointer to Ethernet device structure.
949 * Pointer to array to save the port identifiers.
951 * The length of the array.
953 * Current port to peer port direction.
954 * positive - current used as Tx to get all peer Rx ports.
955 * zero - current used as Rx to get all peer Tx ports.
958 * 0 or positive value on success, actual number of peer ports.
959 * a negative errno value otherwise and rte_errno is set.
962 mlx5_hairpin_get_peer_ports(struct rte_eth_dev *dev, uint16_t *peer_ports,
963 size_t len, uint32_t direction)
965 struct mlx5_priv *priv = dev->data->dev_private;
966 struct mlx5_txq_ctrl *txq_ctrl;
967 struct mlx5_rxq_ctrl *rxq_ctrl;
970 uint32_t bits[(RTE_MAX_ETHPORTS + 31) / 32] = {0};
974 for (i = 0; i < priv->txqs_n; i++) {
975 txq_ctrl = mlx5_txq_get(dev, i);
978 if (txq_ctrl->type != MLX5_TXQ_TYPE_HAIRPIN) {
979 mlx5_txq_release(dev, i);
982 pp = txq_ctrl->hairpin_conf.peers[0].port;
983 if (pp >= RTE_MAX_ETHPORTS) {
985 mlx5_txq_release(dev, i);
986 DRV_LOG(ERR, "port %hu queue %u peer port "
988 priv->dev_data->port_id, i, pp);
991 bits[pp / 32] |= 1 << (pp % 32);
992 mlx5_txq_release(dev, i);
995 for (i = 0; i < priv->rxqs_n; i++) {
996 rxq_ctrl = mlx5_rxq_get(dev, i);
999 if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN) {
1000 mlx5_rxq_release(dev, i);
1003 pp = rxq_ctrl->hairpin_conf.peers[0].port;
1004 if (pp >= RTE_MAX_ETHPORTS) {
1006 mlx5_rxq_release(dev, i);
1007 DRV_LOG(ERR, "port %hu queue %u peer port "
1009 priv->dev_data->port_id, i, pp);
1012 bits[pp / 32] |= 1 << (pp % 32);
1013 mlx5_rxq_release(dev, i);
1016 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
1017 if (bits[i / 32] & (1 << (i % 32))) {
1018 if ((size_t)ret >= len) {
1022 peer_ports[ret++] = i;
1029 * DPDK callback to start the device.
1031 * Simulate device start by attaching all configured flows.
1034 * Pointer to Ethernet device structure.
1037 * 0 on success, a negative errno value otherwise and rte_errno is set.
1040 mlx5_dev_start(struct rte_eth_dev *dev)
1042 struct mlx5_priv *priv = dev->data->dev_private;
1046 DRV_LOG(DEBUG, "port %u starting device", dev->data->port_id);
1047 fine_inline = rte_mbuf_dynflag_lookup
1048 (RTE_PMD_MLX5_FINE_GRANULARITY_INLINE, NULL);
1049 if (fine_inline >= 0)
1050 rte_net_mlx5_dynf_inline_mask = 1UL << fine_inline;
1052 rte_net_mlx5_dynf_inline_mask = 0;
1053 if (dev->data->nb_rx_queues > 0) {
1054 ret = mlx5_dev_configure_rss_reta(dev);
1056 DRV_LOG(ERR, "port %u reta config failed: %s",
1057 dev->data->port_id, strerror(rte_errno));
1061 ret = mlx5_txpp_start(dev);
1063 DRV_LOG(ERR, "port %u Tx packet pacing init failed: %s",
1064 dev->data->port_id, strerror(rte_errno));
1067 if ((priv->config.devx && priv->config.dv_flow_en &&
1068 priv->config.dest_tir) && priv->obj_ops.lb_dummy_queue_create) {
1069 ret = priv->obj_ops.lb_dummy_queue_create(dev);
1073 ret = mlx5_txq_start(dev);
1075 DRV_LOG(ERR, "port %u Tx queue allocation failed: %s",
1076 dev->data->port_id, strerror(rte_errno));
1079 ret = mlx5_rxq_start(dev);
1081 DRV_LOG(ERR, "port %u Rx queue allocation failed: %s",
1082 dev->data->port_id, strerror(rte_errno));
1086 * Such step will be skipped if there is no hairpin TX queue configured
1087 * with RX peer queue from the same device.
1089 ret = mlx5_hairpin_auto_bind(dev);
1091 DRV_LOG(ERR, "port %u hairpin auto binding failed: %s",
1092 dev->data->port_id, strerror(rte_errno));
1095 /* Set started flag here for the following steps like control flow. */
1096 dev->data->dev_started = 1;
1097 ret = mlx5_rx_intr_vec_enable(dev);
1099 DRV_LOG(ERR, "port %u Rx interrupt vector creation failed",
1100 dev->data->port_id);
1103 mlx5_os_stats_init(dev);
1104 ret = mlx5_traffic_enable(dev);
1106 DRV_LOG(ERR, "port %u failed to set defaults flows",
1107 dev->data->port_id);
1110 /* Set a mask and offset of dynamic metadata flows into Rx queues. */
1111 mlx5_flow_rxq_dynf_metadata_set(dev);
1112 /* Set flags and context to convert Rx timestamps. */
1113 mlx5_rxq_timestamp_set(dev);
1114 /* Set a mask and offset of scheduling on timestamp into Tx queues. */
1115 mlx5_txq_dynf_timestamp_set(dev);
1117 * In non-cached mode, it only needs to start the default mreg copy
1118 * action and no flow created by application exists anymore.
1119 * But it is worth wrapping the interface for further usage.
1121 ret = mlx5_flow_start_default(dev);
1123 DRV_LOG(DEBUG, "port %u failed to start default actions: %s",
1124 dev->data->port_id, strerror(rte_errno));
1128 dev->tx_pkt_burst = mlx5_select_tx_function(dev);
1129 dev->rx_pkt_burst = mlx5_select_rx_function(dev);
1130 /* Enable datapath on secondary process. */
1131 mlx5_mp_os_req_start_rxtx(dev);
1132 if (priv->sh->intr_handle.fd >= 0) {
1133 priv->sh->port[priv->dev_port - 1].ih_port_id =
1134 (uint32_t)dev->data->port_id;
1136 DRV_LOG(INFO, "port %u starts without LSC and RMV interrupts.",
1137 dev->data->port_id);
1138 dev->data->dev_conf.intr_conf.lsc = 0;
1139 dev->data->dev_conf.intr_conf.rmv = 0;
1141 if (priv->sh->intr_handle_devx.fd >= 0)
1142 priv->sh->port[priv->dev_port - 1].devx_ih_port_id =
1143 (uint32_t)dev->data->port_id;
1146 ret = rte_errno; /* Save rte_errno before cleanup. */
1148 dev->data->dev_started = 0;
1149 mlx5_flow_stop_default(dev);
1150 mlx5_traffic_disable(dev);
1153 if (priv->obj_ops.lb_dummy_queue_release)
1154 priv->obj_ops.lb_dummy_queue_release(dev);
1155 mlx5_txpp_stop(dev); /* Stop last. */
1156 rte_errno = ret; /* Restore rte_errno. */
1161 * DPDK callback to stop the device.
1163 * Simulate device stop by detaching all configured flows.
1166 * Pointer to Ethernet device structure.
1169 mlx5_dev_stop(struct rte_eth_dev *dev)
1171 struct mlx5_priv *priv = dev->data->dev_private;
1173 dev->data->dev_started = 0;
1174 /* Prevent crashes when queues are still in use. */
1175 dev->rx_pkt_burst = removed_rx_burst;
1176 dev->tx_pkt_burst = removed_tx_burst;
1178 /* Disable datapath on secondary process. */
1179 mlx5_mp_os_req_stop_rxtx(dev);
1180 rte_delay_us_sleep(1000 * priv->rxqs_n);
1181 DRV_LOG(DEBUG, "port %u stopping device", dev->data->port_id);
1182 mlx5_flow_stop_default(dev);
1183 /* Control flows for default traffic can be removed firstly. */
1184 mlx5_traffic_disable(dev);
1185 /* All RX queue flags will be cleared in the flush interface. */
1186 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true);
1187 mlx5_flow_meter_rxq_flush(dev);
1188 mlx5_rx_intr_vec_disable(dev);
1189 priv->sh->port[priv->dev_port - 1].ih_port_id = RTE_MAX_ETHPORTS;
1190 priv->sh->port[priv->dev_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS;
1193 if (priv->obj_ops.lb_dummy_queue_release)
1194 priv->obj_ops.lb_dummy_queue_release(dev);
1195 mlx5_txpp_stop(dev);
1201 * Enable traffic flows configured by control plane
1204 * Pointer to Ethernet device private data.
1206 * Pointer to Ethernet device structure.
1209 * 0 on success, a negative errno value otherwise and rte_errno is set.
1212 mlx5_traffic_enable(struct rte_eth_dev *dev)
1214 struct mlx5_priv *priv = dev->data->dev_private;
1215 struct rte_flow_item_eth bcast = {
1216 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1218 struct rte_flow_item_eth ipv6_multi_spec = {
1219 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
1221 struct rte_flow_item_eth ipv6_multi_mask = {
1222 .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
1224 struct rte_flow_item_eth unicast = {
1225 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1227 struct rte_flow_item_eth unicast_mask = {
1228 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1230 const unsigned int vlan_filter_n = priv->vlan_filter_n;
1231 const struct rte_ether_addr cmp = {
1232 .addr_bytes = "\x00\x00\x00\x00\x00\x00",
1239 * Hairpin txq default flow should be created no matter if it is
1240 * isolation mode. Or else all the packets to be sent will be sent
1241 * out directly without the TX flow actions, e.g. encapsulation.
1243 for (i = 0; i != priv->txqs_n; ++i) {
1244 struct mlx5_txq_ctrl *txq_ctrl = mlx5_txq_get(dev, i);
1247 /* Only Tx implicit mode requires the default Tx flow. */
1248 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN &&
1249 txq_ctrl->hairpin_conf.tx_explicit == 0 &&
1250 txq_ctrl->hairpin_conf.peers[0].port ==
1251 priv->dev_data->port_id) {
1252 ret = mlx5_ctrl_flow_source_queue(dev, i);
1254 mlx5_txq_release(dev, i);
1258 mlx5_txq_release(dev, i);
1260 if (priv->config.dv_esw_en && !priv->config.vf && !priv->config.sf) {
1261 if (mlx5_flow_create_esw_table_zero_flow(dev))
1262 priv->fdb_def_rule = 1;
1264 DRV_LOG(INFO, "port %u FDB default rule cannot be"
1265 " configured - only Eswitch group 0 flows are"
1266 " supported.", dev->data->port_id);
1268 if (!priv->config.lacp_by_user && priv->pf_bond >= 0) {
1269 ret = mlx5_flow_lacp_miss(dev);
1271 DRV_LOG(INFO, "port %u LACP rule cannot be created - "
1272 "forward LACP to kernel.", dev->data->port_id);
1274 DRV_LOG(INFO, "LACP traffic will be missed in port %u."
1275 , dev->data->port_id);
1279 if (dev->data->promiscuous) {
1280 struct rte_flow_item_eth promisc = {
1281 .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1282 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1286 ret = mlx5_ctrl_flow(dev, &promisc, &promisc);
1290 if (dev->data->all_multicast) {
1291 struct rte_flow_item_eth multicast = {
1292 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1293 .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
1297 ret = mlx5_ctrl_flow(dev, &multicast, &multicast);
1301 /* Add broadcast/multicast flows. */
1302 for (i = 0; i != vlan_filter_n; ++i) {
1303 uint16_t vlan = priv->vlan_filter[i];
1305 struct rte_flow_item_vlan vlan_spec = {
1306 .tci = rte_cpu_to_be_16(vlan),
1308 struct rte_flow_item_vlan vlan_mask =
1309 rte_flow_item_vlan_mask;
1311 ret = mlx5_ctrl_flow_vlan(dev, &bcast, &bcast,
1312 &vlan_spec, &vlan_mask);
1315 ret = mlx5_ctrl_flow_vlan(dev, &ipv6_multi_spec,
1317 &vlan_spec, &vlan_mask);
1321 if (!vlan_filter_n) {
1322 ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
1325 ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec,
1328 /* Do not fail on IPv6 broadcast creation failure. */
1330 "IPv6 broadcast is not supported");
1335 /* Add MAC address flows. */
1336 for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
1337 struct rte_ether_addr *mac = &dev->data->mac_addrs[i];
1339 if (!memcmp(mac, &cmp, sizeof(*mac)))
1341 memcpy(&unicast.dst.addr_bytes,
1343 RTE_ETHER_ADDR_LEN);
1344 for (j = 0; j != vlan_filter_n; ++j) {
1345 uint16_t vlan = priv->vlan_filter[j];
1347 struct rte_flow_item_vlan vlan_spec = {
1348 .tci = rte_cpu_to_be_16(vlan),
1350 struct rte_flow_item_vlan vlan_mask =
1351 rte_flow_item_vlan_mask;
1353 ret = mlx5_ctrl_flow_vlan(dev, &unicast,
1360 if (!vlan_filter_n) {
1361 ret = mlx5_ctrl_flow(dev, &unicast, &unicast_mask);
1368 ret = rte_errno; /* Save rte_errno before cleanup. */
1369 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
1370 rte_errno = ret; /* Restore rte_errno. */
1376 * Disable traffic flows configured by control plane
1379 * Pointer to Ethernet device private data.
1382 mlx5_traffic_disable(struct rte_eth_dev *dev)
1384 mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_CTL, false);
1388 * Restart traffic flows configured by control plane
1391 * Pointer to Ethernet device private data.
1394 * 0 on success, a negative errno value otherwise and rte_errno is set.
1397 mlx5_traffic_restart(struct rte_eth_dev *dev)
1399 if (dev->data->dev_started) {
1400 mlx5_traffic_disable(dev);
1401 return mlx5_traffic_enable(dev);