1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
13 #include "mlx5_autoconf.h"
16 #include <rte_malloc.h>
17 #include <rte_ethdev_driver.h>
18 #include <rte_common.h>
20 #include <mlx5_glue.h>
21 #include <mlx5_common.h>
22 #include <mlx5_common_mr.h>
23 #include <mlx5_rxtx.h>
24 #include <mlx5_verbs.h>
25 #include <mlx5_utils.h>
26 #include <mlx5_malloc.h>
29 * Register mr. Given protection domain pointer, pointer to addr and length
30 * register the memory region.
33 * Pointer to protection domain context.
35 * Pointer to memory start address.
37 * Length of the memory to register.
39 * pmd_mr struct set with lkey, address, length and pointer to mr object
42 * 0 on successful registration, -1 otherwise
45 mlx5_reg_mr(void *pd, void *addr, size_t length,
46 struct mlx5_pmd_mr *pmd_mr)
48 return mlx5_common_verbs_reg_mr(pd, addr, length, pmd_mr);
52 * Deregister mr. Given the mlx5 pmd MR - deregister the MR
55 * pmd_mr struct set with lkey, address, length and pointer to mr object
59 mlx5_dereg_mr(struct mlx5_pmd_mr *pmd_mr)
61 mlx5_common_verbs_dereg_mr(pmd_mr);
64 /* verbs operations. */
65 const struct mlx5_verbs_ops mlx5_verbs_ops = {
66 .reg_mr = mlx5_reg_mr,
67 .dereg_mr = mlx5_dereg_mr,
71 * Modify Rx WQ vlan stripping offload
76 * @return 0 on success, non-0 otherwise
79 mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
81 uint16_t vlan_offloads =
82 (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
84 struct ibv_wq_attr mod;
85 mod = (struct ibv_wq_attr){
86 .attr_mask = IBV_WQ_ATTR_FLAGS,
87 .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
88 .flags = vlan_offloads,
91 return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
95 * Modifies the attributes for the specified WQ.
98 * Verbs Rx queue object.
100 * Type of change queue state.
103 * 0 on success, a negative errno value otherwise and rte_errno is set.
106 mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
108 struct ibv_wq_attr mod = {
109 .attr_mask = IBV_WQ_ATTR_STATE,
110 .wq_state = (enum ibv_wq_state)type,
113 return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
117 * Modify QP using Verbs API.
120 * Verbs Tx queue object.
122 * Type of change queue state.
124 * IB device port number.
127 * 0 on success, a negative errno value otherwise and rte_errno is set.
130 mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
133 struct ibv_qp_attr mod = {
134 .qp_state = IBV_QPS_RESET,
135 .port_num = dev_port,
137 int attr_mask = (IBV_QP_STATE | IBV_QP_PORT);
140 if (type != MLX5_TXQ_MOD_RST2RDY) {
141 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
143 DRV_LOG(ERR, "Cannot change Tx QP state to RESET %s",
148 if (type == MLX5_TXQ_MOD_RDY2RST)
151 if (type == MLX5_TXQ_MOD_ERR2RDY)
152 attr_mask = IBV_QP_STATE;
153 mod.qp_state = IBV_QPS_INIT;
154 ret = mlx5_glue->modify_qp(obj->qp, &mod, attr_mask);
156 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
161 mod.qp_state = IBV_QPS_RTR;
162 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
164 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
169 mod.qp_state = IBV_QPS_RTS;
170 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
172 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
181 * Create a CQ Verbs object.
184 * Pointer to Ethernet device.
186 * Queue index in DPDK Rx queue array.
189 * The Verbs CQ object initialized, NULL otherwise and rte_errno is set.
191 static struct ibv_cq *
192 mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)
194 struct mlx5_priv *priv = dev->data->dev_private;
195 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
196 struct mlx5_rxq_ctrl *rxq_ctrl =
197 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
198 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
199 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
201 struct ibv_cq_init_attr_ex ibv;
202 struct mlx5dv_cq_init_attr mlx5;
205 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
207 .channel = rxq_obj->ibv_channel,
210 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
213 if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
214 cq_attr.mlx5.comp_mask |=
215 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
216 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
217 cq_attr.mlx5.cqe_comp_res_format =
218 mlx5_rxq_mprq_enabled(rxq_data) ?
219 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
220 MLX5DV_CQE_RES_FORMAT_HASH;
222 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
225 * For vectorized Rx, it must not be doubled in order to
226 * make cq_ci and rq_ci aligned.
228 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
229 cq_attr.ibv.cqe *= 2;
230 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
232 "Port %u Rx CQE compression is disabled for HW"
236 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
237 if (priv->config.cqe_pad) {
238 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
239 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
242 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
248 * Create a WQ Verbs object.
251 * Pointer to Ethernet device.
253 * Queue index in DPDK Rx queue array.
256 * The Verbs WQ object initialized, NULL otherwise and rte_errno is set.
258 static struct ibv_wq *
259 mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
261 struct mlx5_priv *priv = dev->data->dev_private;
262 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
263 struct mlx5_rxq_ctrl *rxq_ctrl =
264 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
265 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
266 unsigned int wqe_n = 1 << rxq_data->elts_n;
268 struct ibv_wq_init_attr ibv;
269 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
270 struct mlx5dv_wq_init_attr mlx5;
274 wq_attr.ibv = (struct ibv_wq_init_attr){
275 .wq_context = NULL, /* Could be useful in the future. */
276 .wq_type = IBV_WQT_RQ,
277 /* Max number of outstanding WRs. */
278 .max_wr = wqe_n >> rxq_data->sges_n,
279 /* Max number of scatter/gather elements in a WR. */
280 .max_sge = 1 << rxq_data->sges_n,
282 .cq = rxq_obj->ibv_cq,
283 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
284 .create_flags = (rxq_data->vlan_strip ?
285 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
287 /* By default, FCS (CRC) is stripped by hardware. */
288 if (rxq_data->crc_present) {
289 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
290 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
292 if (priv->config.hw_padding) {
293 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
294 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
295 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
296 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
297 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
298 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
301 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
302 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
305 if (mlx5_rxq_mprq_enabled(rxq_data)) {
306 struct mlx5dv_striding_rq_init_attr *mprq_attr =
307 &wq_attr.mlx5.striding_rq_attrs;
309 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
310 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
311 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
312 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
313 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
316 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
319 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
323 * Make sure number of WRs*SGEs match expectations since a queue
324 * cannot allocate more than "desc" buffers.
326 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
327 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
329 "Port %u Rx queue %u requested %u*%u but got"
331 dev->data->port_id, idx,
332 wqe_n >> rxq_data->sges_n,
333 (1 << rxq_data->sges_n),
334 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
335 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
344 * Create the Rx queue Verbs object.
347 * Pointer to Ethernet device.
349 * Queue index in DPDK Rx queue array.
352 * 0 on success, a negative errno value otherwise and rte_errno is set.
355 mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
357 struct mlx5_priv *priv = dev->data->dev_private;
358 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
359 struct mlx5_rxq_ctrl *rxq_ctrl =
360 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
361 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
362 struct mlx5dv_cq cq_info;
363 struct mlx5dv_rwq rwq;
365 struct mlx5dv_obj obj;
367 MLX5_ASSERT(rxq_data);
369 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
370 priv->verbs_alloc_ctx.obj = rxq_ctrl;
371 tmpl->rxq_ctrl = rxq_ctrl;
374 mlx5_glue->create_comp_channel(priv->sh->ctx);
375 if (!tmpl->ibv_channel) {
376 DRV_LOG(ERR, "Port %u: comp channel creation failure.",
381 tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;
383 /* Create CQ using Verbs API. */
384 tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(dev, idx);
386 DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.",
387 dev->data->port_id, idx);
391 obj.cq.in = tmpl->ibv_cq;
392 obj.cq.out = &cq_info;
393 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
398 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
400 "Port %u wrong MLX5_CQE_SIZE environment "
401 "variable value: it should be set to %u.",
402 dev->data->port_id, RTE_CACHE_LINE_SIZE);
406 /* Fill the rings. */
407 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
408 rxq_data->cq_db = cq_info.dbrec;
409 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
410 rxq_data->cq_uar = cq_info.cq_uar;
411 rxq_data->cqn = cq_info.cqn;
412 /* Create WQ (RQ) using Verbs API. */
413 tmpl->wq = mlx5_rxq_ibv_wq_create(dev, idx);
415 DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.",
416 dev->data->port_id, idx);
420 /* Change queue state to ready. */
421 ret = mlx5_ibv_modify_wq(tmpl, IBV_WQS_RDY);
424 "Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
425 dev->data->port_id, idx);
429 obj.rwq.in = tmpl->wq;
431 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
436 rxq_data->wqes = rwq.buf;
437 rxq_data->rq_db = rwq.dbrec;
438 rxq_data->cq_arm_sn = 0;
439 mlx5_rxq_initialize(rxq_data);
441 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
442 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
443 rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
446 ret = rte_errno; /* Save rte_errno before cleanup. */
448 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
450 claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
451 if (tmpl->ibv_channel)
452 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel));
453 rte_errno = ret; /* Restore rte_errno. */
454 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
459 * Release an Rx verbs queue object.
462 * Verbs Rx queue object.
465 mlx5_rxq_ibv_obj_release(struct mlx5_rxq_obj *rxq_obj)
467 MLX5_ASSERT(rxq_obj);
468 MLX5_ASSERT(rxq_obj->wq);
469 MLX5_ASSERT(rxq_obj->ibv_cq);
470 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
471 claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
472 if (rxq_obj->ibv_channel)
473 claim_zero(mlx5_glue->destroy_comp_channel
474 (rxq_obj->ibv_channel));
478 * Get event for an Rx verbs queue object.
481 * Verbs Rx queue object.
484 * 0 on success, a negative errno value otherwise and rte_errno is set.
487 mlx5_rx_ibv_get_event(struct mlx5_rxq_obj *rxq_obj)
489 struct ibv_cq *ev_cq;
491 int ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel,
494 if (ret < 0 || ev_cq != rxq_obj->ibv_cq)
496 mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1);
507 * Creates a receive work queue as a filed of indirection table.
510 * Pointer to Ethernet device.
512 * Log of number of queues in the array.
514 * Verbs indirection table object.
517 * 0 on success, a negative errno value otherwise and rte_errno is set.
520 mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
521 struct mlx5_ind_table_obj *ind_tbl)
523 struct mlx5_priv *priv = dev->data->dev_private;
524 struct ibv_wq *wq[1 << log_n];
527 MLX5_ASSERT(ind_tbl);
528 for (i = 0; i != ind_tbl->queues_n; ++i) {
529 struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
530 struct mlx5_rxq_ctrl *rxq_ctrl =
531 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
533 wq[i] = rxq_ctrl->obj->wq;
536 /* Finalise indirection table. */
537 for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
539 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx,
540 &(struct ibv_rwq_ind_table_init_attr){
541 .log_ind_tbl_size = log_n,
545 if (!ind_tbl->ind_table) {
553 * Destroys the specified Indirection Table.
556 * Indirection table to release.
559 mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
561 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
565 * Create an Rx Hash queue.
568 * Pointer to Ethernet device.
570 * Pointer to Rx Hash queue.
575 * 0 on success, a negative errno value otherwise and rte_errno is set.
578 mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
579 int tunnel __rte_unused)
581 struct mlx5_priv *priv = dev->data->dev_private;
582 struct ibv_qp *qp = NULL;
583 struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
584 const uint8_t *rss_key = hrxq->rss_key;
585 uint64_t hash_fields = hrxq->hash_fields;
587 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
588 struct mlx5dv_qp_init_attr qp_init_attr;
590 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
592 qp_init_attr.comp_mask =
593 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
594 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
596 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
597 if (dev->data->dev_conf.lpbk_mode) {
598 /* Allow packet sent from NIC loop back w/o source MAC check. */
599 qp_init_attr.comp_mask |=
600 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
601 qp_init_attr.create_flags |=
602 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
605 qp = mlx5_glue->dv_create_qp
607 &(struct ibv_qp_init_attr_ex){
608 .qp_type = IBV_QPT_RAW_PACKET,
610 IBV_QP_INIT_ATTR_PD |
611 IBV_QP_INIT_ATTR_IND_TABLE |
612 IBV_QP_INIT_ATTR_RX_HASH,
613 .rx_hash_conf = (struct ibv_rx_hash_conf){
615 IBV_RX_HASH_FUNC_TOEPLITZ,
616 .rx_hash_key_len = hrxq->rss_key_len,
618 (void *)(uintptr_t)rss_key,
619 .rx_hash_fields_mask = hash_fields,
621 .rwq_ind_tbl = ind_tbl->ind_table,
626 qp = mlx5_glue->create_qp_ex
628 &(struct ibv_qp_init_attr_ex){
629 .qp_type = IBV_QPT_RAW_PACKET,
631 IBV_QP_INIT_ATTR_PD |
632 IBV_QP_INIT_ATTR_IND_TABLE |
633 IBV_QP_INIT_ATTR_RX_HASH,
634 .rx_hash_conf = (struct ibv_rx_hash_conf){
636 IBV_RX_HASH_FUNC_TOEPLITZ,
637 .rx_hash_key_len = hrxq->rss_key_len,
639 (void *)(uintptr_t)rss_key,
640 .rx_hash_fields_mask = hash_fields,
642 .rwq_ind_tbl = ind_tbl->ind_table,
651 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
652 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
660 err = rte_errno; /* Save rte_errno before cleanup. */
662 claim_zero(mlx5_glue->destroy_qp(qp));
663 rte_errno = err; /* Restore rte_errno. */
668 * Destroy a Verbs queue pair.
671 * Hash Rx queue to release its qp.
674 mlx5_ibv_qp_destroy(struct mlx5_hrxq *hrxq)
676 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
680 * Release a drop Rx queue Verbs object.
683 * Pointer to Ethernet device.
686 mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev)
688 struct mlx5_priv *priv = dev->data->dev_private;
689 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
692 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
694 claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));
696 priv->drop_queue.rxq = NULL;
700 * Create a drop Rx queue Verbs object.
703 * Pointer to Ethernet device.
706 * 0 on success, a negative errno value otherwise and rte_errno is set.
709 mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
711 struct mlx5_priv *priv = dev->data->dev_private;
712 struct ibv_context *ctx = priv->sh->ctx;
713 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
717 rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
719 DEBUG("Port %u cannot allocate drop Rx queue memory.",
724 priv->drop_queue.rxq = rxq;
725 rxq->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
727 DEBUG("Port %u cannot allocate CQ for drop queue.",
732 rxq->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){
733 .wq_type = IBV_WQT_RQ,
740 DEBUG("Port %u cannot allocate WQ for drop queue.",
745 priv->drop_queue.rxq = rxq;
748 mlx5_rxq_ibv_obj_drop_release(dev);
753 * Create a Verbs drop action for Rx Hash queue.
756 * Pointer to Ethernet device.
759 * 0 on success, a negative errno value otherwise and rte_errno is set.
762 mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
764 struct mlx5_priv *priv = dev->data->dev_private;
765 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
766 struct ibv_rwq_ind_table *ind_tbl = NULL;
767 struct mlx5_rxq_obj *rxq;
770 MLX5_ASSERT(hrxq && hrxq->ind_table);
771 ret = mlx5_rxq_ibv_obj_drop_create(dev);
774 rxq = priv->drop_queue.rxq;
775 ind_tbl = mlx5_glue->create_rwq_ind_table
777 &(struct ibv_rwq_ind_table_init_attr){
778 .log_ind_tbl_size = 0,
779 .ind_tbl = (struct ibv_wq **)&rxq->wq,
783 DEBUG("Port %u cannot allocate indirection table for drop"
784 " queue.", dev->data->port_id);
788 hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
789 &(struct ibv_qp_init_attr_ex){
790 .qp_type = IBV_QPT_RAW_PACKET,
791 .comp_mask = IBV_QP_INIT_ATTR_PD |
792 IBV_QP_INIT_ATTR_IND_TABLE |
793 IBV_QP_INIT_ATTR_RX_HASH,
794 .rx_hash_conf = (struct ibv_rx_hash_conf){
795 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
796 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
797 .rx_hash_key = rss_hash_default_key,
798 .rx_hash_fields_mask = 0,
800 .rwq_ind_tbl = ind_tbl,
804 DEBUG("Port %u cannot allocate QP for drop queue.",
809 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
810 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
816 hrxq->ind_table->ind_table = ind_tbl;
820 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
822 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
823 if (priv->drop_queue.rxq)
824 mlx5_rxq_ibv_obj_drop_release(dev);
829 * Release a drop hash Rx queue.
832 * Pointer to Ethernet device.
835 mlx5_ibv_drop_action_destroy(struct rte_eth_dev *dev)
837 struct mlx5_priv *priv = dev->data->dev_private;
838 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
839 struct ibv_rwq_ind_table *ind_tbl = hrxq->ind_table->ind_table;
841 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
842 claim_zero(mlx5_glue->destroy_flow_action(hrxq->action));
844 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
845 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
846 mlx5_rxq_ibv_obj_drop_release(dev);
850 * Create a QP Verbs object.
853 * Pointer to Ethernet device.
855 * Queue index in DPDK Tx queue array.
858 * The QP Verbs object, NULL otherwise and rte_errno is set.
860 static struct ibv_qp *
861 mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
863 struct mlx5_priv *priv = dev->data->dev_private;
864 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
865 struct mlx5_txq_ctrl *txq_ctrl =
866 container_of(txq_data, struct mlx5_txq_ctrl, txq);
867 struct ibv_qp *qp_obj = NULL;
868 struct ibv_qp_init_attr_ex qp_attr = { 0 };
869 const int desc = 1 << txq_data->elts_n;
871 MLX5_ASSERT(txq_ctrl->obj->cq);
872 /* CQ to be associated with the send queue. */
873 qp_attr.send_cq = txq_ctrl->obj->cq;
874 /* CQ to be associated with the receive queue. */
875 qp_attr.recv_cq = txq_ctrl->obj->cq;
876 /* Max number of outstanding WRs. */
877 qp_attr.cap.max_send_wr = ((priv->sh->device_attr.max_qp_wr < desc) ?
878 priv->sh->device_attr.max_qp_wr : desc);
880 * Max number of scatter/gather elements in a WR, must be 1 to prevent
881 * libmlx5 from trying to affect must be 1 to prevent libmlx5 from
882 * trying to affect too much memory. TX gather is not impacted by the
883 * device_attr.max_sge limit and will still work properly.
885 qp_attr.cap.max_send_sge = 1;
886 qp_attr.qp_type = IBV_QPT_RAW_PACKET,
887 /* Do *NOT* enable this, completions events are managed per Tx burst. */
888 qp_attr.sq_sig_all = 0;
889 qp_attr.pd = priv->sh->pd;
890 qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD;
891 if (txq_data->inlen_send)
892 qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data;
893 if (txq_data->tso_en) {
894 qp_attr.max_tso_header = txq_ctrl->max_tso_header;
895 qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
897 qp_obj = mlx5_glue->create_qp_ex(priv->sh->ctx, &qp_attr);
898 if (qp_obj == NULL) {
899 DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.",
900 dev->data->port_id, idx);
907 * Create the Tx queue Verbs object.
910 * Pointer to Ethernet device.
912 * Queue index in DPDK Tx queue array.
915 * 0 on success, a negative errno value otherwise and rte_errno is set.
918 mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
920 struct mlx5_priv *priv = dev->data->dev_private;
921 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
922 struct mlx5_txq_ctrl *txq_ctrl =
923 container_of(txq_data, struct mlx5_txq_ctrl, txq);
924 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
927 struct mlx5dv_cq cq_info;
928 struct mlx5dv_obj obj;
929 const int desc = 1 << txq_data->elts_n;
932 MLX5_ASSERT(txq_data);
933 MLX5_ASSERT(txq_obj);
934 txq_obj->txq_ctrl = txq_ctrl;
935 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_TX_QUEUE;
936 priv->verbs_alloc_ctx.obj = txq_ctrl;
937 if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
938 DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION "
939 "must never be set.", dev->data->port_id);
943 cqe_n = desc / MLX5_TX_COMP_THRESH +
944 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
945 txq_obj->cq = mlx5_glue->create_cq(priv->sh->ctx, cqe_n, NULL, NULL, 0);
946 if (txq_obj->cq == NULL) {
947 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
948 dev->data->port_id, idx);
952 txq_obj->qp = mlx5_txq_ibv_qp_create(dev, idx);
953 if (txq_obj->qp == NULL) {
957 ret = mlx5_ibv_modify_qp(txq_obj, MLX5_TXQ_MOD_RST2RDY,
958 (uint8_t)priv->dev_port);
960 DRV_LOG(ERR, "Port %u Tx queue %u QP state modifying failed.",
961 dev->data->port_id, idx);
965 qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
966 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
967 /* If using DevX, need additional mask to read tisn value. */
968 if (priv->sh->devx && !priv->sh->tdn)
969 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
971 obj.cq.in = txq_obj->cq;
972 obj.cq.out = &cq_info;
973 obj.qp.in = txq_obj->qp;
975 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
980 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
982 "Port %u wrong MLX5_CQE_SIZE environment variable"
983 " value: it should be set to %u.",
984 dev->data->port_id, RTE_CACHE_LINE_SIZE);
988 txq_data->cqe_n = log2above(cq_info.cqe_cnt);
989 txq_data->cqe_s = 1 << txq_data->cqe_n;
990 txq_data->cqe_m = txq_data->cqe_s - 1;
991 txq_data->qp_num_8s = ((struct ibv_qp *)txq_obj->qp)->qp_num << 8;
992 txq_data->wqes = qp.sq.buf;
993 txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
994 txq_data->wqe_s = 1 << txq_data->wqe_n;
995 txq_data->wqe_m = txq_data->wqe_s - 1;
996 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
997 txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
998 txq_data->cq_db = cq_info.dbrec;
999 txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
1000 txq_data->cq_ci = 0;
1001 txq_data->cq_pi = 0;
1002 txq_data->wqe_ci = 0;
1003 txq_data->wqe_pi = 0;
1004 txq_data->wqe_comp = 0;
1005 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1006 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1008 * If using DevX need to query and store TIS transport domain value.
1009 * This is done once per port.
1010 * Will use this value on Rx, when creating matching TIR.
1012 if (priv->sh->devx && !priv->sh->tdn) {
1013 ret = mlx5_devx_cmd_qp_query_tis_td(txq_obj->qp, qp.tisn,
1016 DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
1017 "transport domain.", dev->data->port_id, idx);
1021 DRV_LOG(DEBUG, "Port %u Tx queue %u TIS number %d "
1022 "transport domain %d.", dev->data->port_id,
1023 idx, qp.tisn, priv->sh->tdn);
1027 txq_ctrl->bf_reg = qp.bf.reg;
1028 if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
1029 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
1030 DRV_LOG(DEBUG, "Port %u: uar_mmap_offset 0x%" PRIx64 ".",
1031 dev->data->port_id, txq_ctrl->uar_mmap_offset);
1034 "Port %u failed to retrieve UAR info, invalid"
1036 dev->data->port_id);
1040 txq_uar_init(txq_ctrl);
1041 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1042 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1045 ret = rte_errno; /* Save rte_errno before cleanup. */
1047 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1049 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1050 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
1051 rte_errno = ret; /* Restore rte_errno. */
1056 * Release an Tx verbs queue object.
1059 * Verbs Tx queue object..
1062 mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj)
1064 MLX5_ASSERT(txq_obj);
1065 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1066 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1069 struct mlx5_obj_ops ibv_obj_ops = {
1070 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip,
1071 .rxq_obj_new = mlx5_rxq_ibv_obj_new,
1072 .rxq_event_get = mlx5_rx_ibv_get_event,
1073 .rxq_obj_modify = mlx5_ibv_modify_wq,
1074 .rxq_obj_release = mlx5_rxq_ibv_obj_release,
1075 .ind_table_new = mlx5_ibv_ind_table_new,
1076 .ind_table_destroy = mlx5_ibv_ind_table_destroy,
1077 .hrxq_new = mlx5_ibv_hrxq_new,
1078 .hrxq_destroy = mlx5_ibv_qp_destroy,
1079 .drop_action_create = mlx5_ibv_drop_action_create,
1080 .drop_action_destroy = mlx5_ibv_drop_action_destroy,
1081 .txq_obj_new = mlx5_txq_ibv_obj_new,
1082 .txq_obj_modify = mlx5_ibv_modify_qp,
1083 .txq_obj_release = mlx5_txq_ibv_obj_release,