5d4ae3ea752e3cc2f5ba8605c79f6f2a84cdb163
[dpdk.git] / drivers / net / mlx5 / linux / mlx5_verbs.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4
5 #include <stddef.h>
6 #include <errno.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <unistd.h>
10 #include <inttypes.h>
11 #include <sys/queue.h>
12
13 #include "mlx5_autoconf.h"
14
15 #include <rte_mbuf.h>
16 #include <rte_malloc.h>
17 #include <ethdev_driver.h>
18 #include <rte_common.h>
19
20 #include <mlx5_glue.h>
21 #include <mlx5_common.h>
22 #include <mlx5_common_mr.h>
23 #include <mlx5_verbs.h>
24 #include <mlx5_rx.h>
25 #include <mlx5_tx.h>
26 #include <mlx5_utils.h>
27 #include <mlx5_malloc.h>
28
29 /**
30  * Modify Rx WQ vlan stripping offload
31  *
32  * @param rxq
33  *   Rx queue.
34  *
35  * @return 0 on success, non-0 otherwise
36  */
37 static int
38 mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
39 {
40         uint16_t vlan_offloads =
41                 (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
42                 0;
43         struct ibv_wq_attr mod;
44         mod = (struct ibv_wq_attr){
45                 .attr_mask = IBV_WQ_ATTR_FLAGS,
46                 .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
47                 .flags = vlan_offloads,
48         };
49
50         return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
51 }
52
53 /**
54  * Modifies the attributes for the specified WQ.
55  *
56  * @param rxq
57  *   Verbs Rx queue.
58  * @param type
59  *   Type of change queue state.
60  *
61  * @return
62  *   0 on success, a negative errno value otherwise and rte_errno is set.
63  */
64 static int
65 mlx5_ibv_modify_wq(struct mlx5_rxq_priv *rxq, uint8_t type)
66 {
67         struct ibv_wq_attr mod = {
68                 .attr_mask = IBV_WQ_ATTR_STATE,
69                 .wq_state = (enum ibv_wq_state)type,
70         };
71
72         return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
73 }
74
75 /**
76  * Modify QP using Verbs API.
77  *
78  * @param txq_obj
79  *   Verbs Tx queue object.
80  * @param type
81  *   Type of change queue state.
82  * @param dev_port
83  *   IB device port number.
84  *
85  * @return
86  *   0 on success, a negative errno value otherwise and rte_errno is set.
87  */
88 static int
89 mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
90                    uint8_t dev_port)
91 {
92         struct ibv_qp_attr mod = {
93                 .qp_state = IBV_QPS_RESET,
94                 .port_num = dev_port,
95         };
96         int attr_mask = (IBV_QP_STATE | IBV_QP_PORT);
97         int ret;
98
99         if (type != MLX5_TXQ_MOD_RST2RDY) {
100                 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
101                 if (ret) {
102                         DRV_LOG(ERR, "Cannot change Tx QP state to RESET %s",
103                                 strerror(errno));
104                         rte_errno = errno;
105                         return ret;
106                 }
107                 if (type == MLX5_TXQ_MOD_RDY2RST)
108                         return 0;
109         }
110         if (type == MLX5_TXQ_MOD_ERR2RDY)
111                 attr_mask = IBV_QP_STATE;
112         mod.qp_state = IBV_QPS_INIT;
113         ret = mlx5_glue->modify_qp(obj->qp, &mod, attr_mask);
114         if (ret) {
115                 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
116                         strerror(errno));
117                 rte_errno = errno;
118                 return ret;
119         }
120         mod.qp_state = IBV_QPS_RTR;
121         ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
122         if (ret) {
123                 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
124                         strerror(errno));
125                 rte_errno = errno;
126                 return ret;
127         }
128         mod.qp_state = IBV_QPS_RTS;
129         ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
130         if (ret) {
131                 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
132                         strerror(errno));
133                 rte_errno = errno;
134                 return ret;
135         }
136         return 0;
137 }
138
139 /**
140  * Create a CQ Verbs object.
141  *
142  * @param rxq
143  *   Pointer to Rx queue.
144  *
145  * @return
146  *   The Verbs CQ object initialized, NULL otherwise and rte_errno is set.
147  */
148 static struct ibv_cq *
149 mlx5_rxq_ibv_cq_create(struct mlx5_rxq_priv *rxq)
150 {
151         struct mlx5_priv *priv = rxq->priv;
152         struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
153         struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
154         struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
155         unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
156         struct {
157                 struct ibv_cq_init_attr_ex ibv;
158                 struct mlx5dv_cq_init_attr mlx5;
159         } cq_attr;
160
161         cq_attr.ibv = (struct ibv_cq_init_attr_ex){
162                 .cqe = cqe_n,
163                 .channel = rxq_obj->ibv_channel,
164                 .comp_mask = 0,
165         };
166         cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
167                 .comp_mask = 0,
168         };
169         if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
170                 cq_attr.mlx5.comp_mask |=
171                                 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
172                 rxq_data->byte_mask = UINT32_MAX;
173 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
174                 if (mlx5_rxq_mprq_enabled(rxq_data)) {
175                         cq_attr.mlx5.cqe_comp_res_format =
176                                         MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX;
177                         rxq_data->mcqe_format =
178                                         MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
179                 } else {
180                         cq_attr.mlx5.cqe_comp_res_format =
181                                         MLX5DV_CQE_RES_FORMAT_HASH;
182                         rxq_data->mcqe_format =
183                                         MLX5_CQE_RESP_FORMAT_HASH;
184                 }
185 #else
186                 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
187                 rxq_data->mcqe_format = MLX5_CQE_RESP_FORMAT_HASH;
188 #endif
189                 /*
190                  * For vectorized Rx, it must not be doubled in order to
191                  * make cq_ci and rq_ci aligned.
192                  */
193                 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
194                         cq_attr.ibv.cqe *= 2;
195         } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
196                 DRV_LOG(DEBUG,
197                         "Port %u Rx CQE compression is disabled for HW"
198                         " timestamp.",
199                         priv->dev_data->port_id);
200         }
201 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
202         if (RTE_CACHE_LINE_SIZE == 128) {
203                 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
204                 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
205         }
206 #endif
207         return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq
208                                                            (priv->sh->cdev->ctx,
209                                                             &cq_attr.ibv,
210                                                             &cq_attr.mlx5));
211 }
212
213 /**
214  * Create a WQ Verbs object.
215  *
216  * @param rxq
217  *   Pointer to Rx queue.
218  *
219  * @return
220  *   The Verbs WQ object initialized, NULL otherwise and rte_errno is set.
221  */
222 static struct ibv_wq *
223 mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq)
224 {
225         struct mlx5_priv *priv = rxq->priv;
226         struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
227         struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
228         struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
229         unsigned int wqe_n = 1 << rxq_data->elts_n;
230         struct {
231                 struct ibv_wq_init_attr ibv;
232 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
233                 struct mlx5dv_wq_init_attr mlx5;
234 #endif
235         } wq_attr;
236
237         wq_attr.ibv = (struct ibv_wq_init_attr){
238                 .wq_context = NULL, /* Could be useful in the future. */
239                 .wq_type = IBV_WQT_RQ,
240                 /* Max number of outstanding WRs. */
241                 .max_wr = wqe_n >> rxq_data->sges_n,
242                 /* Max number of scatter/gather elements in a WR. */
243                 .max_sge = 1 << rxq_data->sges_n,
244                 .pd = priv->sh->cdev->pd,
245                 .cq = rxq_obj->ibv_cq,
246                 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
247                 .create_flags = (rxq_data->vlan_strip ?
248                                  IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
249         };
250         /* By default, FCS (CRC) is stripped by hardware. */
251         if (rxq_data->crc_present) {
252                 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
253                 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
254         }
255         if (priv->config.hw_padding) {
256 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
257                 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
258                 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
259 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
260                 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
261                 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
262 #endif
263         }
264 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
265         wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
266                 .comp_mask = 0,
267         };
268         if (mlx5_rxq_mprq_enabled(rxq_data)) {
269                 struct mlx5dv_striding_rq_init_attr *mprq_attr =
270                                                 &wq_attr.mlx5.striding_rq_attrs;
271
272                 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
273                 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
274                         .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
275                         .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
276                         .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
277                 };
278         }
279         rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->cdev->ctx, &wq_attr.ibv,
280                                               &wq_attr.mlx5);
281 #else
282         rxq_obj->wq = mlx5_glue->create_wq(priv->sh->cdev->ctx, &wq_attr.ibv);
283 #endif
284         if (rxq_obj->wq) {
285                 /*
286                  * Make sure number of WRs*SGEs match expectations since a queue
287                  * cannot allocate more than "desc" buffers.
288                  */
289                 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
290                     wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
291                         DRV_LOG(ERR,
292                                 "Port %u Rx queue %u requested %u*%u but got"
293                                 " %u*%u WRs*SGEs.",
294                                 priv->dev_data->port_id, rxq->idx,
295                                 wqe_n >> rxq_data->sges_n,
296                                 (1 << rxq_data->sges_n),
297                                 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
298                         claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
299                         rxq_obj->wq = NULL;
300                         rte_errno = EINVAL;
301                 }
302         }
303         return rxq_obj->wq;
304 }
305
306 /**
307  * Create the Rx queue Verbs object.
308  *
309  * @param rxq
310  *   Pointer to Rx queue.
311  *
312  * @return
313  *   0 on success, a negative errno value otherwise and rte_errno is set.
314  */
315 static int
316 mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq)
317 {
318         uint16_t idx = rxq->idx;
319         struct mlx5_priv *priv = rxq->priv;
320         uint16_t port_id = priv->dev_data->port_id;
321         struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
322         struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
323         struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
324         struct mlx5dv_cq cq_info;
325         struct mlx5dv_rwq rwq;
326         int ret = 0;
327         struct mlx5dv_obj obj;
328
329         MLX5_ASSERT(rxq_data);
330         MLX5_ASSERT(tmpl);
331         tmpl->rxq_ctrl = rxq_ctrl;
332         if (rxq_ctrl->irq) {
333                 tmpl->ibv_channel =
334                         mlx5_glue->create_comp_channel(priv->sh->cdev->ctx);
335                 if (!tmpl->ibv_channel) {
336                         DRV_LOG(ERR, "Port %u: comp channel creation failure.",
337                                 port_id);
338                         rte_errno = ENOMEM;
339                         goto error;
340                 }
341                 tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;
342         }
343         /* Create CQ using Verbs API. */
344         tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(rxq);
345         if (!tmpl->ibv_cq) {
346                 DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.",
347                         port_id, idx);
348                 rte_errno = ENOMEM;
349                 goto error;
350         }
351         obj.cq.in = tmpl->ibv_cq;
352         obj.cq.out = &cq_info;
353         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
354         if (ret) {
355                 rte_errno = ret;
356                 goto error;
357         }
358         if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
359                 DRV_LOG(ERR,
360                         "Port %u wrong MLX5_CQE_SIZE environment "
361                         "variable value: it should be set to %u.",
362                         port_id, RTE_CACHE_LINE_SIZE);
363                 rte_errno = EINVAL;
364                 goto error;
365         }
366         /* Fill the rings. */
367         rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
368         rxq_data->cq_db = cq_info.dbrec;
369         rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
370         rxq_data->cq_uar = cq_info.cq_uar;
371         rxq_data->cqn = cq_info.cqn;
372         /* Create WQ (RQ) using Verbs API. */
373         tmpl->wq = mlx5_rxq_ibv_wq_create(rxq);
374         if (!tmpl->wq) {
375                 DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.",
376                         port_id, idx);
377                 rte_errno = ENOMEM;
378                 goto error;
379         }
380         /* Change queue state to ready. */
381         ret = mlx5_ibv_modify_wq(rxq, IBV_WQS_RDY);
382         if (ret) {
383                 DRV_LOG(ERR,
384                         "Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
385                         port_id, idx);
386                 rte_errno = ret;
387                 goto error;
388         }
389         obj.rwq.in = tmpl->wq;
390         obj.rwq.out = &rwq;
391         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
392         if (ret) {
393                 rte_errno = ret;
394                 goto error;
395         }
396         rxq_data->wqes = rwq.buf;
397         rxq_data->rq_db = rwq.dbrec;
398         rxq_data->cq_arm_sn = 0;
399         mlx5_rxq_initialize(rxq_data);
400         rxq_data->cq_ci = 0;
401         priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
402         rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
403         return 0;
404 error:
405         ret = rte_errno; /* Save rte_errno before cleanup. */
406         if (tmpl->wq)
407                 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
408         if (tmpl->ibv_cq)
409                 claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
410         if (tmpl->ibv_channel)
411                 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel));
412         rte_errno = ret; /* Restore rte_errno. */
413         return -rte_errno;
414 }
415
416 /**
417  * Release an Rx verbs queue object.
418  *
419  * @param rxq
420  *   Pointer to Rx queue.
421  */
422 static void
423 mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq)
424 {
425         struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
426
427         MLX5_ASSERT(rxq_obj);
428         MLX5_ASSERT(rxq_obj->wq);
429         MLX5_ASSERT(rxq_obj->ibv_cq);
430         claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
431         claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
432         if (rxq_obj->ibv_channel)
433                 claim_zero(mlx5_glue->destroy_comp_channel
434                                                         (rxq_obj->ibv_channel));
435 }
436
437 /**
438  * Get event for an Rx verbs queue object.
439  *
440  * @param rxq_obj
441  *   Verbs Rx queue object.
442  *
443  * @return
444  *   0 on success, a negative errno value otherwise and rte_errno is set.
445  */
446 static int
447 mlx5_rx_ibv_get_event(struct mlx5_rxq_obj *rxq_obj)
448 {
449         struct ibv_cq *ev_cq;
450         void *ev_ctx;
451         int ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel,
452                                           &ev_cq, &ev_ctx);
453
454         if (ret < 0 || ev_cq != rxq_obj->ibv_cq)
455                 goto exit;
456         mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1);
457         return 0;
458 exit:
459         if (ret < 0)
460                 rte_errno = errno;
461         else
462                 rte_errno = EINVAL;
463         return -rte_errno;
464 }
465
466 /**
467  * Creates a receive work queue as a filed of indirection table.
468  *
469  * @param dev
470  *   Pointer to Ethernet device.
471  * @param log_n
472  *   Log of number of queues in the array.
473  * @param ind_tbl
474  *   Verbs indirection table object.
475  *
476  * @return
477  *   0 on success, a negative errno value otherwise and rte_errno is set.
478  */
479 static int
480 mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
481                        struct mlx5_ind_table_obj *ind_tbl)
482 {
483         struct mlx5_priv *priv = dev->data->dev_private;
484         struct ibv_wq *wq[1 << log_n];
485         unsigned int i, j;
486
487         MLX5_ASSERT(ind_tbl);
488         for (i = 0; i != ind_tbl->queues_n; ++i) {
489                 struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
490                 struct mlx5_rxq_ctrl *rxq_ctrl =
491                                 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
492
493                 wq[i] = rxq_ctrl->obj->wq;
494         }
495         MLX5_ASSERT(i > 0);
496         /* Finalise indirection table. */
497         for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
498                 wq[i] = wq[j];
499         ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
500                                         (priv->sh->cdev->ctx,
501                                          &(struct ibv_rwq_ind_table_init_attr){
502                                                  .log_ind_tbl_size = log_n,
503                                                  .ind_tbl = wq,
504                                                  .comp_mask = 0,
505                                          });
506         if (!ind_tbl->ind_table) {
507                 rte_errno = errno;
508                 return -rte_errno;
509         }
510         return 0;
511 }
512
513 /**
514  * Destroys the specified Indirection Table.
515  *
516  * @param ind_table
517  *   Indirection table to release.
518  */
519 static void
520 mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
521 {
522         claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
523 }
524
525 /**
526  * Create an Rx Hash queue.
527  *
528  * @param dev
529  *   Pointer to Ethernet device.
530  * @param hrxq
531  *   Pointer to Rx Hash queue.
532  * @param tunnel
533  *   Tunnel type.
534  *
535  * @return
536  *   0 on success, a negative errno value otherwise and rte_errno is set.
537  */
538 static int
539 mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
540                   int tunnel __rte_unused)
541 {
542         struct mlx5_priv *priv = dev->data->dev_private;
543         struct ibv_qp *qp = NULL;
544         struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
545         const uint8_t *rss_key = hrxq->rss_key;
546         uint64_t hash_fields = hrxq->hash_fields;
547         int err;
548 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
549         struct mlx5dv_qp_init_attr qp_init_attr;
550
551         memset(&qp_init_attr, 0, sizeof(qp_init_attr));
552         if (tunnel) {
553                 qp_init_attr.comp_mask =
554                                        MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
555                 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
556         }
557 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
558         if (dev->data->dev_conf.lpbk_mode) {
559                 /* Allow packet sent from NIC loop back w/o source MAC check. */
560                 qp_init_attr.comp_mask |=
561                                 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
562                 qp_init_attr.create_flags |=
563                                 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
564         }
565 #endif
566         qp = mlx5_glue->dv_create_qp
567                         (priv->sh->cdev->ctx,
568                          &(struct ibv_qp_init_attr_ex){
569                                 .qp_type = IBV_QPT_RAW_PACKET,
570                                 .comp_mask =
571                                         IBV_QP_INIT_ATTR_PD |
572                                         IBV_QP_INIT_ATTR_IND_TABLE |
573                                         IBV_QP_INIT_ATTR_RX_HASH,
574                                 .rx_hash_conf = (struct ibv_rx_hash_conf){
575                                         .rx_hash_function =
576                                                 IBV_RX_HASH_FUNC_TOEPLITZ,
577                                         .rx_hash_key_len = hrxq->rss_key_len,
578                                         .rx_hash_key =
579                                                 (void *)(uintptr_t)rss_key,
580                                         .rx_hash_fields_mask = hash_fields,
581                                 },
582                                 .rwq_ind_tbl = ind_tbl->ind_table,
583                                 .pd = priv->sh->cdev->pd,
584                           },
585                           &qp_init_attr);
586 #else
587         qp = mlx5_glue->create_qp_ex
588                         (priv->sh->cdev->ctx,
589                          &(struct ibv_qp_init_attr_ex){
590                                 .qp_type = IBV_QPT_RAW_PACKET,
591                                 .comp_mask =
592                                         IBV_QP_INIT_ATTR_PD |
593                                         IBV_QP_INIT_ATTR_IND_TABLE |
594                                         IBV_QP_INIT_ATTR_RX_HASH,
595                                 .rx_hash_conf = (struct ibv_rx_hash_conf){
596                                         .rx_hash_function =
597                                                 IBV_RX_HASH_FUNC_TOEPLITZ,
598                                         .rx_hash_key_len = hrxq->rss_key_len,
599                                         .rx_hash_key =
600                                                 (void *)(uintptr_t)rss_key,
601                                         .rx_hash_fields_mask = hash_fields,
602                                 },
603                                 .rwq_ind_tbl = ind_tbl->ind_table,
604                                 .pd = priv->sh->cdev->pd,
605                          });
606 #endif
607         if (!qp) {
608                 rte_errno = errno;
609                 goto error;
610         }
611         hrxq->qp = qp;
612 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
613         hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
614         if (!hrxq->action) {
615                 rte_errno = errno;
616                 goto error;
617         }
618 #endif
619         return 0;
620 error:
621         err = rte_errno; /* Save rte_errno before cleanup. */
622         if (qp)
623                 claim_zero(mlx5_glue->destroy_qp(qp));
624         rte_errno = err; /* Restore rte_errno. */
625         return -rte_errno;
626 }
627
628 /**
629  * Destroy a Verbs queue pair.
630  *
631  * @param hrxq
632  *   Hash Rx queue to release its qp.
633  */
634 static void
635 mlx5_ibv_qp_destroy(struct mlx5_hrxq *hrxq)
636 {
637         claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
638 }
639
640 /**
641  * Release a drop Rx queue Verbs object.
642  *
643  * @param dev
644  *   Pointer to Ethernet device.
645  */
646 static void
647 mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev)
648 {
649         struct mlx5_priv *priv = dev->data->dev_private;
650         struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
651         struct mlx5_rxq_obj *rxq_obj;
652
653         if (rxq == NULL)
654                 return;
655         if (rxq->ctrl == NULL)
656                 goto free_priv;
657         rxq_obj = rxq->ctrl->obj;
658         if (rxq_obj == NULL)
659                 goto free_ctrl;
660         if (rxq_obj->wq)
661                 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
662         if (rxq_obj->ibv_cq)
663                 claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
664         mlx5_free(rxq_obj);
665 free_ctrl:
666         mlx5_free(rxq->ctrl);
667 free_priv:
668         mlx5_free(rxq);
669         priv->drop_queue.rxq = NULL;
670 }
671
672 /**
673  * Create a drop Rx queue Verbs object.
674  *
675  * @param dev
676  *   Pointer to Ethernet device.
677  *
678  * @return
679  *   0 on success, a negative errno value otherwise and rte_errno is set.
680  */
681 static int
682 mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
683 {
684         struct mlx5_priv *priv = dev->data->dev_private;
685         struct ibv_context *ctx = priv->sh->cdev->ctx;
686         struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
687         struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
688         struct mlx5_rxq_obj *rxq_obj = NULL;
689
690         if (rxq != NULL)
691                 return 0;
692         rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
693         if (rxq == NULL) {
694                 DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
695                       dev->data->port_id);
696                 rte_errno = ENOMEM;
697                 return -rte_errno;
698         }
699         priv->drop_queue.rxq = rxq;
700         rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 0,
701                                SOCKET_ID_ANY);
702         if (rxq_ctrl == NULL) {
703                 DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue control memory.",
704                       dev->data->port_id);
705                 rte_errno = ENOMEM;
706                 goto error;
707         }
708         rxq->ctrl = rxq_ctrl;
709         rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0,
710                               SOCKET_ID_ANY);
711         if (rxq_obj == NULL) {
712                 DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
713                       dev->data->port_id);
714                 rte_errno = ENOMEM;
715                 goto error;
716         }
717         rxq_ctrl->obj = rxq_obj;
718         rxq_obj->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
719         if (!rxq_obj->ibv_cq) {
720                 DRV_LOG(DEBUG, "Port %u cannot allocate CQ for drop queue.",
721                       dev->data->port_id);
722                 rte_errno = errno;
723                 goto error;
724         }
725         rxq_obj->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){
726                                                     .wq_type = IBV_WQT_RQ,
727                                                     .max_wr = 1,
728                                                     .max_sge = 1,
729                                                     .pd = priv->sh->cdev->pd,
730                                                     .cq = rxq_obj->ibv_cq,
731                                               });
732         if (!rxq_obj->wq) {
733                 DRV_LOG(DEBUG, "Port %u cannot allocate WQ for drop queue.",
734                       dev->data->port_id);
735                 rte_errno = errno;
736                 goto error;
737         }
738         return 0;
739 error:
740         mlx5_rxq_ibv_obj_drop_release(dev);
741         return -rte_errno;
742 }
743
744 /**
745  * Create a Verbs drop action for Rx Hash queue.
746  *
747  * @param dev
748  *   Pointer to Ethernet device.
749  *
750  * @return
751  *   0 on success, a negative errno value otherwise and rte_errno is set.
752  */
753 static int
754 mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
755 {
756         struct mlx5_priv *priv = dev->data->dev_private;
757         struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
758         struct ibv_rwq_ind_table *ind_tbl = NULL;
759         struct mlx5_rxq_obj *rxq;
760         int ret;
761
762         MLX5_ASSERT(hrxq && hrxq->ind_table);
763         ret = mlx5_rxq_ibv_obj_drop_create(dev);
764         if (ret < 0)
765                 goto error;
766         rxq = priv->drop_queue.rxq->ctrl->obj;
767         ind_tbl = mlx5_glue->create_rwq_ind_table
768                                 (priv->sh->cdev->ctx,
769                                  &(struct ibv_rwq_ind_table_init_attr){
770                                         .log_ind_tbl_size = 0,
771                                         .ind_tbl = (struct ibv_wq **)&rxq->wq,
772                                         .comp_mask = 0,
773                                  });
774         if (!ind_tbl) {
775                 DRV_LOG(DEBUG, "Port %u"
776                         " cannot allocate indirection table for drop queue.",
777                         dev->data->port_id);
778                 rte_errno = errno;
779                 goto error;
780         }
781         hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx,
782                  &(struct ibv_qp_init_attr_ex){
783                         .qp_type = IBV_QPT_RAW_PACKET,
784                         .comp_mask = IBV_QP_INIT_ATTR_PD |
785                                      IBV_QP_INIT_ATTR_IND_TABLE |
786                                      IBV_QP_INIT_ATTR_RX_HASH,
787                         .rx_hash_conf = (struct ibv_rx_hash_conf){
788                                 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
789                                 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
790                                 .rx_hash_key = rss_hash_default_key,
791                                 .rx_hash_fields_mask = 0,
792                                 },
793                         .rwq_ind_tbl = ind_tbl,
794                         .pd = priv->sh->cdev->pd
795                  });
796         if (!hrxq->qp) {
797                 DRV_LOG(DEBUG, "Port %u cannot allocate QP for drop queue.",
798                       dev->data->port_id);
799                 rte_errno = errno;
800                 goto error;
801         }
802 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
803         hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
804         if (!hrxq->action) {
805                 rte_errno = errno;
806                 goto error;
807         }
808 #endif
809         hrxq->ind_table->ind_table = ind_tbl;
810         return 0;
811 error:
812         if (hrxq->qp)
813                 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
814         if (ind_tbl)
815                 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
816         if (priv->drop_queue.rxq)
817                 mlx5_rxq_ibv_obj_drop_release(dev);
818         return -rte_errno;
819 }
820
821 /**
822  * Release a drop hash Rx queue.
823  *
824  * @param dev
825  *   Pointer to Ethernet device.
826  */
827 static void
828 mlx5_ibv_drop_action_destroy(struct rte_eth_dev *dev)
829 {
830         struct mlx5_priv *priv = dev->data->dev_private;
831         struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
832         struct ibv_rwq_ind_table *ind_tbl = hrxq->ind_table->ind_table;
833
834 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
835         claim_zero(mlx5_glue->destroy_flow_action(hrxq->action));
836 #endif
837         claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
838         claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
839         mlx5_rxq_ibv_obj_drop_release(dev);
840 }
841
842 /**
843  * Create a QP Verbs object.
844  *
845  * @param dev
846  *   Pointer to Ethernet device.
847  * @param idx
848  *   Queue index in DPDK Tx queue array.
849  *
850  * @return
851  *   The QP Verbs object, NULL otherwise and rte_errno is set.
852  */
853 static struct ibv_qp *
854 mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
855 {
856         struct mlx5_priv *priv = dev->data->dev_private;
857         struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
858         struct mlx5_txq_ctrl *txq_ctrl =
859                         container_of(txq_data, struct mlx5_txq_ctrl, txq);
860         struct ibv_qp *qp_obj = NULL;
861         struct ibv_qp_init_attr_ex qp_attr = { 0 };
862         const int desc = 1 << txq_data->elts_n;
863
864         MLX5_ASSERT(txq_ctrl->obj->cq);
865         /* CQ to be associated with the send queue. */
866         qp_attr.send_cq = txq_ctrl->obj->cq;
867         /* CQ to be associated with the receive queue. */
868         qp_attr.recv_cq = txq_ctrl->obj->cq;
869         /* Max number of outstanding WRs. */
870         qp_attr.cap.max_send_wr = ((priv->sh->device_attr.max_qp_wr < desc) ?
871                                    priv->sh->device_attr.max_qp_wr : desc);
872         /*
873          * Max number of scatter/gather elements in a WR, must be 1 to prevent
874          * libmlx5 from trying to affect must be 1 to prevent libmlx5 from
875          * trying to affect too much memory. TX gather is not impacted by the
876          * device_attr.max_sge limit and will still work properly.
877          */
878         qp_attr.cap.max_send_sge = 1;
879         qp_attr.qp_type = IBV_QPT_RAW_PACKET,
880         /* Do *NOT* enable this, completions events are managed per Tx burst. */
881         qp_attr.sq_sig_all = 0;
882         qp_attr.pd = priv->sh->cdev->pd;
883         qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD;
884         if (txq_data->inlen_send)
885                 qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data;
886         if (txq_data->tso_en) {
887                 qp_attr.max_tso_header = txq_ctrl->max_tso_header;
888                 qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
889         }
890         qp_obj = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx, &qp_attr);
891         if (qp_obj == NULL) {
892                 DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.",
893                         dev->data->port_id, idx);
894                 rte_errno = errno;
895         }
896         return qp_obj;
897 }
898
899 /**
900  * Create the Tx queue Verbs object.
901  *
902  * @param dev
903  *   Pointer to Ethernet device.
904  * @param idx
905  *   Queue index in DPDK Tx queue array.
906  *
907  * @return
908  *   0 on success, a negative errno value otherwise and rte_errno is set.
909  */
910 int
911 mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
912 {
913         struct mlx5_priv *priv = dev->data->dev_private;
914         struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
915         struct mlx5_txq_ctrl *txq_ctrl =
916                 container_of(txq_data, struct mlx5_txq_ctrl, txq);
917         struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
918         unsigned int cqe_n;
919         struct mlx5dv_qp qp;
920         struct mlx5dv_cq cq_info;
921         struct mlx5dv_obj obj;
922         const int desc = 1 << txq_data->elts_n;
923         int ret = 0;
924
925         MLX5_ASSERT(txq_data);
926         MLX5_ASSERT(txq_obj);
927         txq_obj->txq_ctrl = txq_ctrl;
928         if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
929                 DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION "
930                         "must never be set.", dev->data->port_id);
931                 rte_errno = EINVAL;
932                 return -rte_errno;
933         }
934         cqe_n = desc / MLX5_TX_COMP_THRESH +
935                 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
936         txq_obj->cq = mlx5_glue->create_cq(priv->sh->cdev->ctx, cqe_n,
937                                            NULL, NULL, 0);
938         if (txq_obj->cq == NULL) {
939                 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
940                         dev->data->port_id, idx);
941                 rte_errno = errno;
942                 goto error;
943         }
944         txq_obj->qp = mlx5_txq_ibv_qp_create(dev, idx);
945         if (txq_obj->qp == NULL) {
946                 rte_errno = errno;
947                 goto error;
948         }
949         ret = mlx5_ibv_modify_qp(txq_obj, MLX5_TXQ_MOD_RST2RDY,
950                                  (uint8_t)priv->dev_port);
951         if (ret) {
952                 DRV_LOG(ERR, "Port %u Tx queue %u QP state modifying failed.",
953                         dev->data->port_id, idx);
954                 rte_errno = errno;
955                 goto error;
956         }
957         qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
958 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
959         /* If using DevX, need additional mask to read tisn value. */
960         if (priv->sh->devx && !priv->sh->tdn)
961                 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
962 #endif
963         obj.cq.in = txq_obj->cq;
964         obj.cq.out = &cq_info;
965         obj.qp.in = txq_obj->qp;
966         obj.qp.out = &qp;
967         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
968         if (ret != 0) {
969                 rte_errno = errno;
970                 goto error;
971         }
972         if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
973                 DRV_LOG(ERR,
974                         "Port %u wrong MLX5_CQE_SIZE environment variable"
975                         " value: it should be set to %u.",
976                         dev->data->port_id, RTE_CACHE_LINE_SIZE);
977                 rte_errno = EINVAL;
978                 goto error;
979         }
980         txq_data->cqe_n = log2above(cq_info.cqe_cnt);
981         txq_data->cqe_s = 1 << txq_data->cqe_n;
982         txq_data->cqe_m = txq_data->cqe_s - 1;
983         txq_data->qp_num_8s = ((struct ibv_qp *)txq_obj->qp)->qp_num << 8;
984         txq_data->wqes = qp.sq.buf;
985         txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
986         txq_data->wqe_s = 1 << txq_data->wqe_n;
987         txq_data->wqe_m = txq_data->wqe_s - 1;
988         txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
989         txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
990         txq_data->cq_db = cq_info.dbrec;
991         txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
992         txq_data->cq_ci = 0;
993         txq_data->cq_pi = 0;
994         txq_data->wqe_ci = 0;
995         txq_data->wqe_pi = 0;
996         txq_data->wqe_comp = 0;
997         txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
998 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
999         /*
1000          * If using DevX need to query and store TIS transport domain value.
1001          * This is done once per port.
1002          * Will use this value on Rx, when creating matching TIR.
1003          */
1004         if (priv->sh->devx && !priv->sh->tdn) {
1005                 ret = mlx5_devx_cmd_qp_query_tis_td(txq_obj->qp, qp.tisn,
1006                                                     &priv->sh->tdn);
1007                 if (ret) {
1008                         DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
1009                                 "transport domain.", dev->data->port_id, idx);
1010                         rte_errno = EINVAL;
1011                         goto error;
1012                 } else {
1013                         DRV_LOG(DEBUG, "Port %u Tx queue %u TIS number %d "
1014                                 "transport domain %d.", dev->data->port_id,
1015                                 idx, qp.tisn, priv->sh->tdn);
1016                 }
1017         }
1018 #endif
1019         txq_ctrl->bf_reg = qp.bf.reg;
1020         if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
1021                 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
1022                 DRV_LOG(DEBUG, "Port %u: uar_mmap_offset 0x%" PRIx64 ".",
1023                         dev->data->port_id, txq_ctrl->uar_mmap_offset);
1024         } else {
1025                 DRV_LOG(ERR,
1026                         "Port %u failed to retrieve UAR info, invalid"
1027                         " libmlx5.so",
1028                         dev->data->port_id);
1029                 rte_errno = EINVAL;
1030                 goto error;
1031         }
1032         txq_uar_init(txq_ctrl);
1033         dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1034         return 0;
1035 error:
1036         ret = rte_errno; /* Save rte_errno before cleanup. */
1037         if (txq_obj->cq)
1038                 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1039         if (txq_obj->qp)
1040                 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1041         rte_errno = ret; /* Restore rte_errno. */
1042         return -rte_errno;
1043 }
1044
1045 /*
1046  * Create the dummy QP with minimal resources for loopback.
1047  *
1048  * @param dev
1049  *   Pointer to Ethernet device.
1050  *
1051  * @return
1052  *   0 on success, a negative errno value otherwise and rte_errno is set.
1053  */
1054 int
1055 mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
1056 {
1057 #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
1058         struct mlx5_priv *priv = dev->data->dev_private;
1059         struct mlx5_dev_ctx_shared *sh = priv->sh;
1060         struct ibv_context *ctx = sh->cdev->ctx;
1061         struct mlx5dv_qp_init_attr qp_init_attr = {0};
1062         struct {
1063                 struct ibv_cq_init_attr_ex ibv;
1064                 struct mlx5dv_cq_init_attr mlx5;
1065         } cq_attr = {{0}};
1066
1067         if (dev->data->dev_conf.lpbk_mode) {
1068                 /* Allow packet sent from NIC loop back w/o source MAC check. */
1069                 qp_init_attr.comp_mask |=
1070                                 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1071                 qp_init_attr.create_flags |=
1072                                 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
1073         } else {
1074                 return 0;
1075         }
1076         /* Only need to check refcnt, 0 after "sh" is allocated. */
1077         if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
1078                 MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
1079                 priv->lb_used = 1;
1080                 return 0;
1081         }
1082         cq_attr.ibv = (struct ibv_cq_init_attr_ex){
1083                 .cqe = 1,
1084                 .channel = NULL,
1085                 .comp_mask = 0,
1086         };
1087         cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
1088                 .comp_mask = 0,
1089         };
1090         /* Only CQ is needed, no WQ(RQ) is required in this case. */
1091         sh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx,
1092                                                         &cq_attr.ibv,
1093                                                         &cq_attr.mlx5));
1094         if (!sh->self_lb.ibv_cq) {
1095                 DRV_LOG(ERR, "Port %u cannot allocate CQ for loopback.",
1096                         dev->data->port_id);
1097                 rte_errno = errno;
1098                 goto error;
1099         }
1100         sh->self_lb.qp = mlx5_glue->dv_create_qp(ctx,
1101                                 &(struct ibv_qp_init_attr_ex){
1102                                         .qp_type = IBV_QPT_RAW_PACKET,
1103                                         .comp_mask = IBV_QP_INIT_ATTR_PD,
1104                                         .pd = sh->cdev->pd,
1105                                         .send_cq = sh->self_lb.ibv_cq,
1106                                         .recv_cq = sh->self_lb.ibv_cq,
1107                                         .cap.max_recv_wr = 1,
1108                                 },
1109                                 &qp_init_attr);
1110         if (!sh->self_lb.qp) {
1111                 DRV_LOG(DEBUG, "Port %u cannot allocate QP for loopback.",
1112                         dev->data->port_id);
1113                 rte_errno = errno;
1114                 goto error;
1115         }
1116         priv->lb_used = 1;
1117         return 0;
1118 error:
1119         if (sh->self_lb.ibv_cq) {
1120                 claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
1121                 sh->self_lb.ibv_cq = NULL;
1122         }
1123         (void)__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
1124         return -rte_errno;
1125 #else
1126         RTE_SET_USED(dev);
1127         return 0;
1128 #endif
1129 }
1130
1131 /*
1132  * Release the dummy queue resources for loopback.
1133  *
1134  * @param dev
1135  *   Pointer to Ethernet device.
1136  */
1137 void
1138 mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev)
1139 {
1140 #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
1141         struct mlx5_priv *priv = dev->data->dev_private;
1142         struct mlx5_dev_ctx_shared *sh = priv->sh;
1143
1144         if (!priv->lb_used)
1145                 return;
1146         MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
1147         if (!(__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
1148                 if (sh->self_lb.qp) {
1149                         claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
1150                         sh->self_lb.qp = NULL;
1151                 }
1152                 if (sh->self_lb.ibv_cq) {
1153                         claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
1154                         sh->self_lb.ibv_cq = NULL;
1155                 }
1156         }
1157         priv->lb_used = 0;
1158 #else
1159         RTE_SET_USED(dev);
1160         return;
1161 #endif
1162 }
1163
1164 /**
1165  * Release an Tx verbs queue object.
1166  *
1167  * @param txq_obj
1168  *   Verbs Tx queue object..
1169  */
1170 void
1171 mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj)
1172 {
1173         MLX5_ASSERT(txq_obj);
1174         claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1175         claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1176 }
1177
1178 struct mlx5_obj_ops ibv_obj_ops = {
1179         .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip,
1180         .rxq_obj_new = mlx5_rxq_ibv_obj_new,
1181         .rxq_event_get = mlx5_rx_ibv_get_event,
1182         .rxq_obj_modify = mlx5_ibv_modify_wq,
1183         .rxq_obj_release = mlx5_rxq_ibv_obj_release,
1184         .ind_table_new = mlx5_ibv_ind_table_new,
1185         .ind_table_destroy = mlx5_ibv_ind_table_destroy,
1186         .hrxq_new = mlx5_ibv_hrxq_new,
1187         .hrxq_destroy = mlx5_ibv_qp_destroy,
1188         .drop_action_create = mlx5_ibv_drop_action_create,
1189         .drop_action_destroy = mlx5_ibv_drop_action_destroy,
1190         .txq_obj_new = mlx5_txq_ibv_obj_new,
1191         .txq_obj_modify = mlx5_ibv_modify_qp,
1192         .txq_obj_release = mlx5_txq_ibv_obj_release,
1193         .lb_dummy_queue_create = NULL,
1194         .lb_dummy_queue_release = NULL,
1195 };