net/mlx5: remove Rx queue data list from device
[dpdk.git] / drivers / net / mlx5 / linux / mlx5_verbs.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4
5 #include <stddef.h>
6 #include <errno.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <unistd.h>
10 #include <inttypes.h>
11 #include <sys/queue.h>
12
13 #include "mlx5_autoconf.h"
14
15 #include <rte_mbuf.h>
16 #include <rte_malloc.h>
17 #include <ethdev_driver.h>
18 #include <rte_common.h>
19
20 #include <mlx5_glue.h>
21 #include <mlx5_common.h>
22 #include <mlx5_common_mr.h>
23 #include <mlx5_verbs.h>
24 #include <mlx5_rx.h>
25 #include <mlx5_tx.h>
26 #include <mlx5_utils.h>
27 #include <mlx5_malloc.h>
28
29 /**
30  * Modify Rx WQ vlan stripping offload
31  *
32  * @param rxq
33  *   Rx queue.
34  *
35  * @return 0 on success, non-0 otherwise
36  */
37 static int
38 mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_priv *rxq, int on)
39 {
40         uint16_t vlan_offloads =
41                 (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
42                 0;
43         struct ibv_wq_attr mod;
44         mod = (struct ibv_wq_attr){
45                 .attr_mask = IBV_WQ_ATTR_FLAGS,
46                 .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
47                 .flags = vlan_offloads,
48         };
49
50         return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
51 }
52
53 /**
54  * Modifies the attributes for the specified WQ.
55  *
56  * @param rxq
57  *   Verbs Rx queue.
58  * @param type
59  *   Type of change queue state.
60  *
61  * @return
62  *   0 on success, a negative errno value otherwise and rte_errno is set.
63  */
64 static int
65 mlx5_ibv_modify_wq(struct mlx5_rxq_priv *rxq, uint8_t type)
66 {
67         struct ibv_wq_attr mod = {
68                 .attr_mask = IBV_WQ_ATTR_STATE,
69                 .wq_state = (enum ibv_wq_state)type,
70         };
71
72         return mlx5_glue->modify_wq(rxq->ctrl->obj->wq, &mod);
73 }
74
75 /**
76  * Modify QP using Verbs API.
77  *
78  * @param txq_obj
79  *   Verbs Tx queue object.
80  * @param type
81  *   Type of change queue state.
82  * @param dev_port
83  *   IB device port number.
84  *
85  * @return
86  *   0 on success, a negative errno value otherwise and rte_errno is set.
87  */
88 static int
89 mlx5_ibv_modify_qp(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
90                    uint8_t dev_port)
91 {
92         struct ibv_qp_attr mod = {
93                 .qp_state = IBV_QPS_RESET,
94                 .port_num = dev_port,
95         };
96         int attr_mask = (IBV_QP_STATE | IBV_QP_PORT);
97         int ret;
98
99         if (type != MLX5_TXQ_MOD_RST2RDY) {
100                 ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
101                 if (ret) {
102                         DRV_LOG(ERR, "Cannot change Tx QP state to RESET %s",
103                                 strerror(errno));
104                         rte_errno = errno;
105                         return ret;
106                 }
107                 if (type == MLX5_TXQ_MOD_RDY2RST)
108                         return 0;
109         }
110         if (type == MLX5_TXQ_MOD_ERR2RDY)
111                 attr_mask = IBV_QP_STATE;
112         mod.qp_state = IBV_QPS_INIT;
113         ret = mlx5_glue->modify_qp(obj->qp, &mod, attr_mask);
114         if (ret) {
115                 DRV_LOG(ERR, "Cannot change Tx QP state to INIT %s",
116                         strerror(errno));
117                 rte_errno = errno;
118                 return ret;
119         }
120         mod.qp_state = IBV_QPS_RTR;
121         ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
122         if (ret) {
123                 DRV_LOG(ERR, "Cannot change Tx QP state to RTR %s",
124                         strerror(errno));
125                 rte_errno = errno;
126                 return ret;
127         }
128         mod.qp_state = IBV_QPS_RTS;
129         ret = mlx5_glue->modify_qp(obj->qp, &mod, IBV_QP_STATE);
130         if (ret) {
131                 DRV_LOG(ERR, "Cannot change Tx QP state to RTS %s",
132                         strerror(errno));
133                 rte_errno = errno;
134                 return ret;
135         }
136         return 0;
137 }
138
139 /**
140  * Create a CQ Verbs object.
141  *
142  * @param rxq
143  *   Pointer to Rx queue.
144  *
145  * @return
146  *   The Verbs CQ object initialized, NULL otherwise and rte_errno is set.
147  */
148 static struct ibv_cq *
149 mlx5_rxq_ibv_cq_create(struct mlx5_rxq_priv *rxq)
150 {
151         struct mlx5_priv *priv = rxq->priv;
152         struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
153         struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
154         struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
155         unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
156         struct {
157                 struct ibv_cq_init_attr_ex ibv;
158                 struct mlx5dv_cq_init_attr mlx5;
159         } cq_attr;
160
161         cq_attr.ibv = (struct ibv_cq_init_attr_ex){
162                 .cqe = cqe_n,
163                 .channel = rxq_obj->ibv_channel,
164                 .comp_mask = 0,
165         };
166         cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
167                 .comp_mask = 0,
168         };
169         if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
170                 cq_attr.mlx5.comp_mask |=
171                                 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
172                 rxq_data->byte_mask = UINT32_MAX;
173 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
174                 if (mlx5_rxq_mprq_enabled(rxq_data)) {
175                         cq_attr.mlx5.cqe_comp_res_format =
176                                         MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX;
177                         rxq_data->mcqe_format =
178                                         MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
179                 } else {
180                         cq_attr.mlx5.cqe_comp_res_format =
181                                         MLX5DV_CQE_RES_FORMAT_HASH;
182                         rxq_data->mcqe_format =
183                                         MLX5_CQE_RESP_FORMAT_HASH;
184                 }
185 #else
186                 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
187                 rxq_data->mcqe_format = MLX5_CQE_RESP_FORMAT_HASH;
188 #endif
189                 /*
190                  * For vectorized Rx, it must not be doubled in order to
191                  * make cq_ci and rq_ci aligned.
192                  */
193                 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
194                         cq_attr.ibv.cqe *= 2;
195         } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
196                 DRV_LOG(DEBUG,
197                         "Port %u Rx CQE compression is disabled for HW"
198                         " timestamp.",
199                         priv->dev_data->port_id);
200         }
201 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
202         if (RTE_CACHE_LINE_SIZE == 128) {
203                 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
204                 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
205         }
206 #endif
207         return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq
208                                                            (priv->sh->cdev->ctx,
209                                                             &cq_attr.ibv,
210                                                             &cq_attr.mlx5));
211 }
212
213 /**
214  * Create a WQ Verbs object.
215  *
216  * @param rxq
217  *   Pointer to Rx queue.
218  *
219  * @return
220  *   The Verbs WQ object initialized, NULL otherwise and rte_errno is set.
221  */
222 static struct ibv_wq *
223 mlx5_rxq_ibv_wq_create(struct mlx5_rxq_priv *rxq)
224 {
225         struct mlx5_priv *priv = rxq->priv;
226         struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
227         struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
228         struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
229         unsigned int wqe_n = 1 << rxq_data->elts_n;
230         struct {
231                 struct ibv_wq_init_attr ibv;
232 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
233                 struct mlx5dv_wq_init_attr mlx5;
234 #endif
235         } wq_attr;
236
237         wq_attr.ibv = (struct ibv_wq_init_attr){
238                 .wq_context = NULL, /* Could be useful in the future. */
239                 .wq_type = IBV_WQT_RQ,
240                 /* Max number of outstanding WRs. */
241                 .max_wr = wqe_n >> rxq_data->sges_n,
242                 /* Max number of scatter/gather elements in a WR. */
243                 .max_sge = 1 << rxq_data->sges_n,
244                 .pd = priv->sh->cdev->pd,
245                 .cq = rxq_obj->ibv_cq,
246                 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
247                 .create_flags = (rxq_data->vlan_strip ?
248                                  IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
249         };
250         /* By default, FCS (CRC) is stripped by hardware. */
251         if (rxq_data->crc_present) {
252                 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
253                 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
254         }
255         if (priv->config.hw_padding) {
256 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
257                 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
258                 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
259 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
260                 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
261                 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
262 #endif
263         }
264 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
265         wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
266                 .comp_mask = 0,
267         };
268         if (mlx5_rxq_mprq_enabled(rxq_data)) {
269                 struct mlx5dv_striding_rq_init_attr *mprq_attr =
270                                                 &wq_attr.mlx5.striding_rq_attrs;
271
272                 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
273                 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
274                         .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
275                         .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
276                         .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
277                 };
278         }
279         rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->cdev->ctx, &wq_attr.ibv,
280                                               &wq_attr.mlx5);
281 #else
282         rxq_obj->wq = mlx5_glue->create_wq(priv->sh->cdev->ctx, &wq_attr.ibv);
283 #endif
284         if (rxq_obj->wq) {
285                 /*
286                  * Make sure number of WRs*SGEs match expectations since a queue
287                  * cannot allocate more than "desc" buffers.
288                  */
289                 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
290                     wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
291                         DRV_LOG(ERR,
292                                 "Port %u Rx queue %u requested %u*%u but got"
293                                 " %u*%u WRs*SGEs.",
294                                 priv->dev_data->port_id, rxq->idx,
295                                 wqe_n >> rxq_data->sges_n,
296                                 (1 << rxq_data->sges_n),
297                                 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
298                         claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
299                         rxq_obj->wq = NULL;
300                         rte_errno = EINVAL;
301                 }
302         }
303         return rxq_obj->wq;
304 }
305
306 /**
307  * Create the Rx queue Verbs object.
308  *
309  * @param rxq
310  *   Pointer to Rx queue.
311  *
312  * @return
313  *   0 on success, a negative errno value otherwise and rte_errno is set.
314  */
315 static int
316 mlx5_rxq_ibv_obj_new(struct mlx5_rxq_priv *rxq)
317 {
318         uint16_t idx = rxq->idx;
319         struct mlx5_priv *priv = rxq->priv;
320         uint16_t port_id = priv->dev_data->port_id;
321         struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
322         struct mlx5_rxq_data *rxq_data = &rxq_ctrl->rxq;
323         struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
324         struct mlx5dv_cq cq_info;
325         struct mlx5dv_rwq rwq;
326         int ret = 0;
327         struct mlx5dv_obj obj;
328
329         MLX5_ASSERT(rxq_data);
330         MLX5_ASSERT(tmpl);
331         tmpl->rxq_ctrl = rxq_ctrl;
332         if (rxq_ctrl->irq) {
333                 tmpl->ibv_channel =
334                         mlx5_glue->create_comp_channel(priv->sh->cdev->ctx);
335                 if (!tmpl->ibv_channel) {
336                         DRV_LOG(ERR, "Port %u: comp channel creation failure.",
337                                 port_id);
338                         rte_errno = ENOMEM;
339                         goto error;
340                 }
341                 tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;
342         }
343         /* Create CQ using Verbs API. */
344         tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(rxq);
345         if (!tmpl->ibv_cq) {
346                 DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.",
347                         port_id, idx);
348                 rte_errno = ENOMEM;
349                 goto error;
350         }
351         obj.cq.in = tmpl->ibv_cq;
352         obj.cq.out = &cq_info;
353         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
354         if (ret) {
355                 rte_errno = ret;
356                 goto error;
357         }
358         if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
359                 DRV_LOG(ERR,
360                         "Port %u wrong MLX5_CQE_SIZE environment "
361                         "variable value: it should be set to %u.",
362                         port_id, RTE_CACHE_LINE_SIZE);
363                 rte_errno = EINVAL;
364                 goto error;
365         }
366         /* Fill the rings. */
367         rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
368         rxq_data->cq_db = cq_info.dbrec;
369         rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
370         rxq_data->cq_uar = cq_info.cq_uar;
371         rxq_data->cqn = cq_info.cqn;
372         /* Create WQ (RQ) using Verbs API. */
373         tmpl->wq = mlx5_rxq_ibv_wq_create(rxq);
374         if (!tmpl->wq) {
375                 DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.",
376                         port_id, idx);
377                 rte_errno = ENOMEM;
378                 goto error;
379         }
380         /* Change queue state to ready. */
381         ret = mlx5_ibv_modify_wq(rxq, IBV_WQS_RDY);
382         if (ret) {
383                 DRV_LOG(ERR,
384                         "Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
385                         port_id, idx);
386                 rte_errno = ret;
387                 goto error;
388         }
389         obj.rwq.in = tmpl->wq;
390         obj.rwq.out = &rwq;
391         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
392         if (ret) {
393                 rte_errno = ret;
394                 goto error;
395         }
396         rxq_data->wqes = rwq.buf;
397         rxq_data->rq_db = rwq.dbrec;
398         rxq_data->cq_arm_sn = 0;
399         mlx5_rxq_initialize(rxq_data);
400         rxq_data->cq_ci = 0;
401         priv->dev_data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
402         rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
403         return 0;
404 error:
405         ret = rte_errno; /* Save rte_errno before cleanup. */
406         if (tmpl->wq)
407                 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
408         if (tmpl->ibv_cq)
409                 claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
410         if (tmpl->ibv_channel)
411                 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel));
412         rte_errno = ret; /* Restore rte_errno. */
413         return -rte_errno;
414 }
415
416 /**
417  * Release an Rx verbs queue object.
418  *
419  * @param rxq
420  *   Pointer to Rx queue.
421  */
422 static void
423 mlx5_rxq_ibv_obj_release(struct mlx5_rxq_priv *rxq)
424 {
425         struct mlx5_rxq_obj *rxq_obj = rxq->ctrl->obj;
426
427         MLX5_ASSERT(rxq_obj);
428         MLX5_ASSERT(rxq_obj->wq);
429         MLX5_ASSERT(rxq_obj->ibv_cq);
430         claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
431         claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
432         if (rxq_obj->ibv_channel)
433                 claim_zero(mlx5_glue->destroy_comp_channel
434                                                         (rxq_obj->ibv_channel));
435 }
436
437 /**
438  * Get event for an Rx verbs queue object.
439  *
440  * @param rxq_obj
441  *   Verbs Rx queue object.
442  *
443  * @return
444  *   0 on success, a negative errno value otherwise and rte_errno is set.
445  */
446 static int
447 mlx5_rx_ibv_get_event(struct mlx5_rxq_obj *rxq_obj)
448 {
449         struct ibv_cq *ev_cq;
450         void *ev_ctx;
451         int ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel,
452                                           &ev_cq, &ev_ctx);
453
454         if (ret < 0 || ev_cq != rxq_obj->ibv_cq)
455                 goto exit;
456         mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1);
457         return 0;
458 exit:
459         if (ret < 0)
460                 rte_errno = errno;
461         else
462                 rte_errno = EINVAL;
463         return -rte_errno;
464 }
465
466 /**
467  * Creates a receive work queue as a filed of indirection table.
468  *
469  * @param dev
470  *   Pointer to Ethernet device.
471  * @param log_n
472  *   Log of number of queues in the array.
473  * @param ind_tbl
474  *   Verbs indirection table object.
475  *
476  * @return
477  *   0 on success, a negative errno value otherwise and rte_errno is set.
478  */
479 static int
480 mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
481                        struct mlx5_ind_table_obj *ind_tbl)
482 {
483         struct mlx5_priv *priv = dev->data->dev_private;
484         struct ibv_wq *wq[1 << log_n];
485         unsigned int i, j;
486
487         MLX5_ASSERT(ind_tbl);
488         for (i = 0; i != ind_tbl->queues_n; ++i) {
489                 struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev,
490                                                          ind_tbl->queues[i]);
491
492                 wq[i] = rxq->ctrl->obj->wq;
493         }
494         MLX5_ASSERT(i > 0);
495         /* Finalise indirection table. */
496         for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
497                 wq[i] = wq[j];
498         ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table
499                                         (priv->sh->cdev->ctx,
500                                          &(struct ibv_rwq_ind_table_init_attr){
501                                                  .log_ind_tbl_size = log_n,
502                                                  .ind_tbl = wq,
503                                                  .comp_mask = 0,
504                                          });
505         if (!ind_tbl->ind_table) {
506                 rte_errno = errno;
507                 return -rte_errno;
508         }
509         return 0;
510 }
511
512 /**
513  * Destroys the specified Indirection Table.
514  *
515  * @param ind_table
516  *   Indirection table to release.
517  */
518 static void
519 mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
520 {
521         claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
522 }
523
524 /**
525  * Create an Rx Hash queue.
526  *
527  * @param dev
528  *   Pointer to Ethernet device.
529  * @param hrxq
530  *   Pointer to Rx Hash queue.
531  * @param tunnel
532  *   Tunnel type.
533  *
534  * @return
535  *   0 on success, a negative errno value otherwise and rte_errno is set.
536  */
537 static int
538 mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
539                   int tunnel __rte_unused)
540 {
541         struct mlx5_priv *priv = dev->data->dev_private;
542         struct ibv_qp *qp = NULL;
543         struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
544         const uint8_t *rss_key = hrxq->rss_key;
545         uint64_t hash_fields = hrxq->hash_fields;
546         int err;
547 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
548         struct mlx5dv_qp_init_attr qp_init_attr;
549
550         memset(&qp_init_attr, 0, sizeof(qp_init_attr));
551         if (tunnel) {
552                 qp_init_attr.comp_mask =
553                                        MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
554                 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
555         }
556 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
557         if (dev->data->dev_conf.lpbk_mode) {
558                 /* Allow packet sent from NIC loop back w/o source MAC check. */
559                 qp_init_attr.comp_mask |=
560                                 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
561                 qp_init_attr.create_flags |=
562                                 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
563         }
564 #endif
565         qp = mlx5_glue->dv_create_qp
566                         (priv->sh->cdev->ctx,
567                          &(struct ibv_qp_init_attr_ex){
568                                 .qp_type = IBV_QPT_RAW_PACKET,
569                                 .comp_mask =
570                                         IBV_QP_INIT_ATTR_PD |
571                                         IBV_QP_INIT_ATTR_IND_TABLE |
572                                         IBV_QP_INIT_ATTR_RX_HASH,
573                                 .rx_hash_conf = (struct ibv_rx_hash_conf){
574                                         .rx_hash_function =
575                                                 IBV_RX_HASH_FUNC_TOEPLITZ,
576                                         .rx_hash_key_len = hrxq->rss_key_len,
577                                         .rx_hash_key =
578                                                 (void *)(uintptr_t)rss_key,
579                                         .rx_hash_fields_mask = hash_fields,
580                                 },
581                                 .rwq_ind_tbl = ind_tbl->ind_table,
582                                 .pd = priv->sh->cdev->pd,
583                           },
584                           &qp_init_attr);
585 #else
586         qp = mlx5_glue->create_qp_ex
587                         (priv->sh->cdev->ctx,
588                          &(struct ibv_qp_init_attr_ex){
589                                 .qp_type = IBV_QPT_RAW_PACKET,
590                                 .comp_mask =
591                                         IBV_QP_INIT_ATTR_PD |
592                                         IBV_QP_INIT_ATTR_IND_TABLE |
593                                         IBV_QP_INIT_ATTR_RX_HASH,
594                                 .rx_hash_conf = (struct ibv_rx_hash_conf){
595                                         .rx_hash_function =
596                                                 IBV_RX_HASH_FUNC_TOEPLITZ,
597                                         .rx_hash_key_len = hrxq->rss_key_len,
598                                         .rx_hash_key =
599                                                 (void *)(uintptr_t)rss_key,
600                                         .rx_hash_fields_mask = hash_fields,
601                                 },
602                                 .rwq_ind_tbl = ind_tbl->ind_table,
603                                 .pd = priv->sh->cdev->pd,
604                          });
605 #endif
606         if (!qp) {
607                 rte_errno = errno;
608                 goto error;
609         }
610         hrxq->qp = qp;
611 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
612         hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
613         if (!hrxq->action) {
614                 rte_errno = errno;
615                 goto error;
616         }
617 #endif
618         return 0;
619 error:
620         err = rte_errno; /* Save rte_errno before cleanup. */
621         if (qp)
622                 claim_zero(mlx5_glue->destroy_qp(qp));
623         rte_errno = err; /* Restore rte_errno. */
624         return -rte_errno;
625 }
626
627 /**
628  * Destroy a Verbs queue pair.
629  *
630  * @param hrxq
631  *   Hash Rx queue to release its qp.
632  */
633 static void
634 mlx5_ibv_qp_destroy(struct mlx5_hrxq *hrxq)
635 {
636         claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
637 }
638
639 /**
640  * Release a drop Rx queue Verbs object.
641  *
642  * @param dev
643  *   Pointer to Ethernet device.
644  */
645 static void
646 mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev)
647 {
648         struct mlx5_priv *priv = dev->data->dev_private;
649         struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
650         struct mlx5_rxq_obj *rxq_obj;
651
652         if (rxq == NULL)
653                 return;
654         if (rxq->ctrl == NULL)
655                 goto free_priv;
656         rxq_obj = rxq->ctrl->obj;
657         if (rxq_obj == NULL)
658                 goto free_ctrl;
659         if (rxq_obj->wq)
660                 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
661         if (rxq_obj->ibv_cq)
662                 claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
663         mlx5_free(rxq_obj);
664 free_ctrl:
665         mlx5_free(rxq->ctrl);
666 free_priv:
667         mlx5_free(rxq);
668         priv->drop_queue.rxq = NULL;
669 }
670
671 /**
672  * Create a drop Rx queue Verbs object.
673  *
674  * @param dev
675  *   Pointer to Ethernet device.
676  *
677  * @return
678  *   0 on success, a negative errno value otherwise and rte_errno is set.
679  */
680 static int
681 mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
682 {
683         struct mlx5_priv *priv = dev->data->dev_private;
684         struct ibv_context *ctx = priv->sh->cdev->ctx;
685         struct mlx5_rxq_priv *rxq = priv->drop_queue.rxq;
686         struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
687         struct mlx5_rxq_obj *rxq_obj = NULL;
688
689         if (rxq != NULL)
690                 return 0;
691         rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
692         if (rxq == NULL) {
693                 DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
694                       dev->data->port_id);
695                 rte_errno = ENOMEM;
696                 return -rte_errno;
697         }
698         priv->drop_queue.rxq = rxq;
699         rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl), 0,
700                                SOCKET_ID_ANY);
701         if (rxq_ctrl == NULL) {
702                 DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue control memory.",
703                       dev->data->port_id);
704                 rte_errno = ENOMEM;
705                 goto error;
706         }
707         rxq->ctrl = rxq_ctrl;
708         rxq_obj = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_obj), 0,
709                               SOCKET_ID_ANY);
710         if (rxq_obj == NULL) {
711                 DRV_LOG(DEBUG, "Port %u cannot allocate drop Rx queue memory.",
712                       dev->data->port_id);
713                 rte_errno = ENOMEM;
714                 goto error;
715         }
716         rxq_ctrl->obj = rxq_obj;
717         rxq_obj->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
718         if (!rxq_obj->ibv_cq) {
719                 DRV_LOG(DEBUG, "Port %u cannot allocate CQ for drop queue.",
720                       dev->data->port_id);
721                 rte_errno = errno;
722                 goto error;
723         }
724         rxq_obj->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){
725                                                     .wq_type = IBV_WQT_RQ,
726                                                     .max_wr = 1,
727                                                     .max_sge = 1,
728                                                     .pd = priv->sh->cdev->pd,
729                                                     .cq = rxq_obj->ibv_cq,
730                                               });
731         if (!rxq_obj->wq) {
732                 DRV_LOG(DEBUG, "Port %u cannot allocate WQ for drop queue.",
733                       dev->data->port_id);
734                 rte_errno = errno;
735                 goto error;
736         }
737         return 0;
738 error:
739         mlx5_rxq_ibv_obj_drop_release(dev);
740         return -rte_errno;
741 }
742
743 /**
744  * Create a Verbs drop action for Rx Hash queue.
745  *
746  * @param dev
747  *   Pointer to Ethernet device.
748  *
749  * @return
750  *   0 on success, a negative errno value otherwise and rte_errno is set.
751  */
752 static int
753 mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
754 {
755         struct mlx5_priv *priv = dev->data->dev_private;
756         struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
757         struct ibv_rwq_ind_table *ind_tbl = NULL;
758         struct mlx5_rxq_obj *rxq;
759         int ret;
760
761         MLX5_ASSERT(hrxq && hrxq->ind_table);
762         ret = mlx5_rxq_ibv_obj_drop_create(dev);
763         if (ret < 0)
764                 goto error;
765         rxq = priv->drop_queue.rxq->ctrl->obj;
766         ind_tbl = mlx5_glue->create_rwq_ind_table
767                                 (priv->sh->cdev->ctx,
768                                  &(struct ibv_rwq_ind_table_init_attr){
769                                         .log_ind_tbl_size = 0,
770                                         .ind_tbl = (struct ibv_wq **)&rxq->wq,
771                                         .comp_mask = 0,
772                                  });
773         if (!ind_tbl) {
774                 DRV_LOG(DEBUG, "Port %u"
775                         " cannot allocate indirection table for drop queue.",
776                         dev->data->port_id);
777                 rte_errno = errno;
778                 goto error;
779         }
780         hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx,
781                  &(struct ibv_qp_init_attr_ex){
782                         .qp_type = IBV_QPT_RAW_PACKET,
783                         .comp_mask = IBV_QP_INIT_ATTR_PD |
784                                      IBV_QP_INIT_ATTR_IND_TABLE |
785                                      IBV_QP_INIT_ATTR_RX_HASH,
786                         .rx_hash_conf = (struct ibv_rx_hash_conf){
787                                 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
788                                 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
789                                 .rx_hash_key = rss_hash_default_key,
790                                 .rx_hash_fields_mask = 0,
791                                 },
792                         .rwq_ind_tbl = ind_tbl,
793                         .pd = priv->sh->cdev->pd
794                  });
795         if (!hrxq->qp) {
796                 DRV_LOG(DEBUG, "Port %u cannot allocate QP for drop queue.",
797                       dev->data->port_id);
798                 rte_errno = errno;
799                 goto error;
800         }
801 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
802         hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
803         if (!hrxq->action) {
804                 rte_errno = errno;
805                 goto error;
806         }
807 #endif
808         hrxq->ind_table->ind_table = ind_tbl;
809         return 0;
810 error:
811         if (hrxq->qp)
812                 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
813         if (ind_tbl)
814                 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
815         if (priv->drop_queue.rxq)
816                 mlx5_rxq_ibv_obj_drop_release(dev);
817         return -rte_errno;
818 }
819
820 /**
821  * Release a drop hash Rx queue.
822  *
823  * @param dev
824  *   Pointer to Ethernet device.
825  */
826 static void
827 mlx5_ibv_drop_action_destroy(struct rte_eth_dev *dev)
828 {
829         struct mlx5_priv *priv = dev->data->dev_private;
830         struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
831         struct ibv_rwq_ind_table *ind_tbl = hrxq->ind_table->ind_table;
832
833 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
834         claim_zero(mlx5_glue->destroy_flow_action(hrxq->action));
835 #endif
836         claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
837         claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
838         mlx5_rxq_ibv_obj_drop_release(dev);
839 }
840
841 /**
842  * Create a QP Verbs object.
843  *
844  * @param dev
845  *   Pointer to Ethernet device.
846  * @param idx
847  *   Queue index in DPDK Tx queue array.
848  *
849  * @return
850  *   The QP Verbs object, NULL otherwise and rte_errno is set.
851  */
852 static struct ibv_qp *
853 mlx5_txq_ibv_qp_create(struct rte_eth_dev *dev, uint16_t idx)
854 {
855         struct mlx5_priv *priv = dev->data->dev_private;
856         struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
857         struct mlx5_txq_ctrl *txq_ctrl =
858                         container_of(txq_data, struct mlx5_txq_ctrl, txq);
859         struct ibv_qp *qp_obj = NULL;
860         struct ibv_qp_init_attr_ex qp_attr = { 0 };
861         const int desc = 1 << txq_data->elts_n;
862
863         MLX5_ASSERT(txq_ctrl->obj->cq);
864         /* CQ to be associated with the send queue. */
865         qp_attr.send_cq = txq_ctrl->obj->cq;
866         /* CQ to be associated with the receive queue. */
867         qp_attr.recv_cq = txq_ctrl->obj->cq;
868         /* Max number of outstanding WRs. */
869         qp_attr.cap.max_send_wr = ((priv->sh->device_attr.max_qp_wr < desc) ?
870                                    priv->sh->device_attr.max_qp_wr : desc);
871         /*
872          * Max number of scatter/gather elements in a WR, must be 1 to prevent
873          * libmlx5 from trying to affect must be 1 to prevent libmlx5 from
874          * trying to affect too much memory. TX gather is not impacted by the
875          * device_attr.max_sge limit and will still work properly.
876          */
877         qp_attr.cap.max_send_sge = 1;
878         qp_attr.qp_type = IBV_QPT_RAW_PACKET,
879         /* Do *NOT* enable this, completions events are managed per Tx burst. */
880         qp_attr.sq_sig_all = 0;
881         qp_attr.pd = priv->sh->cdev->pd;
882         qp_attr.comp_mask = IBV_QP_INIT_ATTR_PD;
883         if (txq_data->inlen_send)
884                 qp_attr.cap.max_inline_data = txq_ctrl->max_inline_data;
885         if (txq_data->tso_en) {
886                 qp_attr.max_tso_header = txq_ctrl->max_tso_header;
887                 qp_attr.comp_mask |= IBV_QP_INIT_ATTR_MAX_TSO_HEADER;
888         }
889         qp_obj = mlx5_glue->create_qp_ex(priv->sh->cdev->ctx, &qp_attr);
890         if (qp_obj == NULL) {
891                 DRV_LOG(ERR, "Port %u Tx queue %u QP creation failure.",
892                         dev->data->port_id, idx);
893                 rte_errno = errno;
894         }
895         return qp_obj;
896 }
897
898 /**
899  * Create the Tx queue Verbs object.
900  *
901  * @param dev
902  *   Pointer to Ethernet device.
903  * @param idx
904  *   Queue index in DPDK Tx queue array.
905  *
906  * @return
907  *   0 on success, a negative errno value otherwise and rte_errno is set.
908  */
909 int
910 mlx5_txq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
911 {
912         struct mlx5_priv *priv = dev->data->dev_private;
913         struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
914         struct mlx5_txq_ctrl *txq_ctrl =
915                 container_of(txq_data, struct mlx5_txq_ctrl, txq);
916         struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
917         unsigned int cqe_n;
918         struct mlx5dv_qp qp;
919         struct mlx5dv_cq cq_info;
920         struct mlx5dv_obj obj;
921         const int desc = 1 << txq_data->elts_n;
922         int ret = 0;
923
924         MLX5_ASSERT(txq_data);
925         MLX5_ASSERT(txq_obj);
926         txq_obj->txq_ctrl = txq_ctrl;
927         if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) {
928                 DRV_LOG(ERR, "Port %u MLX5_ENABLE_CQE_COMPRESSION "
929                         "must never be set.", dev->data->port_id);
930                 rte_errno = EINVAL;
931                 return -rte_errno;
932         }
933         cqe_n = desc / MLX5_TX_COMP_THRESH +
934                 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
935         txq_obj->cq = mlx5_glue->create_cq(priv->sh->cdev->ctx, cqe_n,
936                                            NULL, NULL, 0);
937         if (txq_obj->cq == NULL) {
938                 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
939                         dev->data->port_id, idx);
940                 rte_errno = errno;
941                 goto error;
942         }
943         txq_obj->qp = mlx5_txq_ibv_qp_create(dev, idx);
944         if (txq_obj->qp == NULL) {
945                 rte_errno = errno;
946                 goto error;
947         }
948         ret = mlx5_ibv_modify_qp(txq_obj, MLX5_TXQ_MOD_RST2RDY,
949                                  (uint8_t)priv->dev_port);
950         if (ret) {
951                 DRV_LOG(ERR, "Port %u Tx queue %u QP state modifying failed.",
952                         dev->data->port_id, idx);
953                 rte_errno = errno;
954                 goto error;
955         }
956         qp.comp_mask = MLX5DV_QP_MASK_UAR_MMAP_OFFSET;
957 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
958         /* If using DevX, need additional mask to read tisn value. */
959         if (priv->sh->devx && !priv->sh->tdn)
960                 qp.comp_mask |= MLX5DV_QP_MASK_RAW_QP_HANDLES;
961 #endif
962         obj.cq.in = txq_obj->cq;
963         obj.cq.out = &cq_info;
964         obj.qp.in = txq_obj->qp;
965         obj.qp.out = &qp;
966         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP);
967         if (ret != 0) {
968                 rte_errno = errno;
969                 goto error;
970         }
971         if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
972                 DRV_LOG(ERR,
973                         "Port %u wrong MLX5_CQE_SIZE environment variable"
974                         " value: it should be set to %u.",
975                         dev->data->port_id, RTE_CACHE_LINE_SIZE);
976                 rte_errno = EINVAL;
977                 goto error;
978         }
979         txq_data->cqe_n = log2above(cq_info.cqe_cnt);
980         txq_data->cqe_s = 1 << txq_data->cqe_n;
981         txq_data->cqe_m = txq_data->cqe_s - 1;
982         txq_data->qp_num_8s = ((struct ibv_qp *)txq_obj->qp)->qp_num << 8;
983         txq_data->wqes = qp.sq.buf;
984         txq_data->wqe_n = log2above(qp.sq.wqe_cnt);
985         txq_data->wqe_s = 1 << txq_data->wqe_n;
986         txq_data->wqe_m = txq_data->wqe_s - 1;
987         txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
988         txq_data->qp_db = &qp.dbrec[MLX5_SND_DBR];
989         txq_data->cq_db = cq_info.dbrec;
990         txq_data->cqes = (volatile struct mlx5_cqe *)cq_info.buf;
991         txq_data->cq_ci = 0;
992         txq_data->cq_pi = 0;
993         txq_data->wqe_ci = 0;
994         txq_data->wqe_pi = 0;
995         txq_data->wqe_comp = 0;
996         txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
997 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
998         /*
999          * If using DevX need to query and store TIS transport domain value.
1000          * This is done once per port.
1001          * Will use this value on Rx, when creating matching TIR.
1002          */
1003         if (priv->sh->devx && !priv->sh->tdn) {
1004                 ret = mlx5_devx_cmd_qp_query_tis_td(txq_obj->qp, qp.tisn,
1005                                                     &priv->sh->tdn);
1006                 if (ret) {
1007                         DRV_LOG(ERR, "Fail to query port %u Tx queue %u QP TIS "
1008                                 "transport domain.", dev->data->port_id, idx);
1009                         rte_errno = EINVAL;
1010                         goto error;
1011                 } else {
1012                         DRV_LOG(DEBUG, "Port %u Tx queue %u TIS number %d "
1013                                 "transport domain %d.", dev->data->port_id,
1014                                 idx, qp.tisn, priv->sh->tdn);
1015                 }
1016         }
1017 #endif
1018         txq_ctrl->bf_reg = qp.bf.reg;
1019         if (qp.comp_mask & MLX5DV_QP_MASK_UAR_MMAP_OFFSET) {
1020                 txq_ctrl->uar_mmap_offset = qp.uar_mmap_offset;
1021                 DRV_LOG(DEBUG, "Port %u: uar_mmap_offset 0x%" PRIx64 ".",
1022                         dev->data->port_id, txq_ctrl->uar_mmap_offset);
1023         } else {
1024                 DRV_LOG(ERR,
1025                         "Port %u failed to retrieve UAR info, invalid"
1026                         " libmlx5.so",
1027                         dev->data->port_id);
1028                 rte_errno = EINVAL;
1029                 goto error;
1030         }
1031         txq_uar_init(txq_ctrl);
1032         dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1033         return 0;
1034 error:
1035         ret = rte_errno; /* Save rte_errno before cleanup. */
1036         if (txq_obj->cq)
1037                 claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1038         if (txq_obj->qp)
1039                 claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1040         rte_errno = ret; /* Restore rte_errno. */
1041         return -rte_errno;
1042 }
1043
1044 /*
1045  * Create the dummy QP with minimal resources for loopback.
1046  *
1047  * @param dev
1048  *   Pointer to Ethernet device.
1049  *
1050  * @return
1051  *   0 on success, a negative errno value otherwise and rte_errno is set.
1052  */
1053 int
1054 mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev)
1055 {
1056 #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
1057         struct mlx5_priv *priv = dev->data->dev_private;
1058         struct mlx5_dev_ctx_shared *sh = priv->sh;
1059         struct ibv_context *ctx = sh->cdev->ctx;
1060         struct mlx5dv_qp_init_attr qp_init_attr = {0};
1061         struct {
1062                 struct ibv_cq_init_attr_ex ibv;
1063                 struct mlx5dv_cq_init_attr mlx5;
1064         } cq_attr = {{0}};
1065
1066         if (dev->data->dev_conf.lpbk_mode) {
1067                 /* Allow packet sent from NIC loop back w/o source MAC check. */
1068                 qp_init_attr.comp_mask |=
1069                                 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
1070                 qp_init_attr.create_flags |=
1071                                 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
1072         } else {
1073                 return 0;
1074         }
1075         /* Only need to check refcnt, 0 after "sh" is allocated. */
1076         if (!!(__atomic_fetch_add(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
1077                 MLX5_ASSERT(sh->self_lb.ibv_cq && sh->self_lb.qp);
1078                 priv->lb_used = 1;
1079                 return 0;
1080         }
1081         cq_attr.ibv = (struct ibv_cq_init_attr_ex){
1082                 .cqe = 1,
1083                 .channel = NULL,
1084                 .comp_mask = 0,
1085         };
1086         cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
1087                 .comp_mask = 0,
1088         };
1089         /* Only CQ is needed, no WQ(RQ) is required in this case. */
1090         sh->self_lb.ibv_cq = mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(ctx,
1091                                                         &cq_attr.ibv,
1092                                                         &cq_attr.mlx5));
1093         if (!sh->self_lb.ibv_cq) {
1094                 DRV_LOG(ERR, "Port %u cannot allocate CQ for loopback.",
1095                         dev->data->port_id);
1096                 rte_errno = errno;
1097                 goto error;
1098         }
1099         sh->self_lb.qp = mlx5_glue->dv_create_qp(ctx,
1100                                 &(struct ibv_qp_init_attr_ex){
1101                                         .qp_type = IBV_QPT_RAW_PACKET,
1102                                         .comp_mask = IBV_QP_INIT_ATTR_PD,
1103                                         .pd = sh->cdev->pd,
1104                                         .send_cq = sh->self_lb.ibv_cq,
1105                                         .recv_cq = sh->self_lb.ibv_cq,
1106                                         .cap.max_recv_wr = 1,
1107                                 },
1108                                 &qp_init_attr);
1109         if (!sh->self_lb.qp) {
1110                 DRV_LOG(DEBUG, "Port %u cannot allocate QP for loopback.",
1111                         dev->data->port_id);
1112                 rte_errno = errno;
1113                 goto error;
1114         }
1115         priv->lb_used = 1;
1116         return 0;
1117 error:
1118         if (sh->self_lb.ibv_cq) {
1119                 claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
1120                 sh->self_lb.ibv_cq = NULL;
1121         }
1122         (void)__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED);
1123         return -rte_errno;
1124 #else
1125         RTE_SET_USED(dev);
1126         return 0;
1127 #endif
1128 }
1129
1130 /*
1131  * Release the dummy queue resources for loopback.
1132  *
1133  * @param dev
1134  *   Pointer to Ethernet device.
1135  */
1136 void
1137 mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev)
1138 {
1139 #if defined(HAVE_IBV_DEVICE_TUNNEL_SUPPORT) && defined(HAVE_IBV_FLOW_DV_SUPPORT)
1140         struct mlx5_priv *priv = dev->data->dev_private;
1141         struct mlx5_dev_ctx_shared *sh = priv->sh;
1142
1143         if (!priv->lb_used)
1144                 return;
1145         MLX5_ASSERT(__atomic_load_n(&sh->self_lb.refcnt, __ATOMIC_RELAXED));
1146         if (!(__atomic_sub_fetch(&sh->self_lb.refcnt, 1, __ATOMIC_RELAXED))) {
1147                 if (sh->self_lb.qp) {
1148                         claim_zero(mlx5_glue->destroy_qp(sh->self_lb.qp));
1149                         sh->self_lb.qp = NULL;
1150                 }
1151                 if (sh->self_lb.ibv_cq) {
1152                         claim_zero(mlx5_glue->destroy_cq(sh->self_lb.ibv_cq));
1153                         sh->self_lb.ibv_cq = NULL;
1154                 }
1155         }
1156         priv->lb_used = 0;
1157 #else
1158         RTE_SET_USED(dev);
1159         return;
1160 #endif
1161 }
1162
1163 /**
1164  * Release an Tx verbs queue object.
1165  *
1166  * @param txq_obj
1167  *   Verbs Tx queue object..
1168  */
1169 void
1170 mlx5_txq_ibv_obj_release(struct mlx5_txq_obj *txq_obj)
1171 {
1172         MLX5_ASSERT(txq_obj);
1173         claim_zero(mlx5_glue->destroy_qp(txq_obj->qp));
1174         claim_zero(mlx5_glue->destroy_cq(txq_obj->cq));
1175 }
1176
1177 struct mlx5_obj_ops ibv_obj_ops = {
1178         .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip,
1179         .rxq_obj_new = mlx5_rxq_ibv_obj_new,
1180         .rxq_event_get = mlx5_rx_ibv_get_event,
1181         .rxq_obj_modify = mlx5_ibv_modify_wq,
1182         .rxq_obj_release = mlx5_rxq_ibv_obj_release,
1183         .ind_table_new = mlx5_ibv_ind_table_new,
1184         .ind_table_destroy = mlx5_ibv_ind_table_destroy,
1185         .hrxq_new = mlx5_ibv_hrxq_new,
1186         .hrxq_destroy = mlx5_ibv_qp_destroy,
1187         .drop_action_create = mlx5_ibv_drop_action_create,
1188         .drop_action_destroy = mlx5_ibv_drop_action_destroy,
1189         .txq_obj_new = mlx5_txq_ibv_obj_new,
1190         .txq_obj_modify = mlx5_ibv_modify_qp,
1191         .txq_obj_release = mlx5_txq_ibv_obj_release,
1192         .lb_dummy_queue_create = NULL,
1193         .lb_dummy_queue_release = NULL,
1194 };