net/mlx5: separate Rx interrupt handling
[dpdk.git] / drivers / net / mlx5 / mlx5_devx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  */
4
5 #include <stddef.h>
6 #include <errno.h>
7 #include <string.h>
8 #include <stdint.h>
9 #include <sys/queue.h>
10
11 #include <rte_malloc.h>
12 #include <rte_common.h>
13 #include <rte_eal_paging.h>
14
15 #include <mlx5_glue.h>
16 #include <mlx5_devx_cmds.h>
17 #include <mlx5_malloc.h>
18
19 #include "mlx5.h"
20 #include "mlx5_common_os.h"
21 #include "mlx5_rxtx.h"
22 #include "mlx5_utils.h"
23 #include "mlx5_devx.h"
24
25 /**
26  * Modify RQ vlan stripping offload
27  *
28  * @param rxq_obj
29  *   Rx queue object.
30  *
31  * @return 0 on success, non-0 otherwise
32  */
33 static int
34 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
35 {
36         struct mlx5_devx_modify_rq_attr rq_attr;
37
38         memset(&rq_attr, 0, sizeof(rq_attr));
39         rq_attr.rq_state = MLX5_RQC_STATE_RDY;
40         rq_attr.state = MLX5_RQC_STATE_RDY;
41         rq_attr.vsd = (on ? 0 : 1);
42         rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
43         return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
44 }
45
46 /**
47  * Release the resources allocated for an RQ DevX object.
48  *
49  * @param rxq_ctrl
50  *   DevX Rx queue object.
51  */
52 static void
53 rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
54 {
55         if (rxq_ctrl->rxq.wqes) {
56                 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
57                 rxq_ctrl->rxq.wqes = NULL;
58         }
59         if (rxq_ctrl->wq_umem) {
60                 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
61                 rxq_ctrl->wq_umem = NULL;
62         }
63 }
64
65 /**
66  * Release the resources allocated for the Rx CQ DevX object.
67  *
68  * @param rxq_ctrl
69  *   DevX Rx queue object.
70  */
71 static void
72 rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
73 {
74         if (rxq_ctrl->rxq.cqes) {
75                 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
76                 rxq_ctrl->rxq.cqes = NULL;
77         }
78         if (rxq_ctrl->cq_umem) {
79                 mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
80                 rxq_ctrl->cq_umem = NULL;
81         }
82 }
83
84 /**
85  * Release an Rx hairpin related resources.
86  *
87  * @param rxq_obj
88  *   Hairpin Rx queue object.
89  */
90 static void
91 mlx5_rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
92 {
93         struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
94
95         MLX5_ASSERT(rxq_obj);
96         rq_attr.state = MLX5_RQC_STATE_RST;
97         rq_attr.rq_state = MLX5_RQC_STATE_RDY;
98         mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
99         claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
100 }
101
102 /**
103  * Release an Rx DevX queue object.
104  *
105  * @param rxq_obj
106  *   DevX Rx queue object.
107  */
108 static void
109 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
110 {
111         struct mlx5_priv *priv = rxq_obj->rxq_ctrl->priv;
112
113         MLX5_ASSERT(rxq_obj);
114         MLX5_ASSERT(rxq_obj->rq);
115         if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) {
116                 mlx5_rxq_obj_hairpin_release(rxq_obj);
117         } else {
118                 MLX5_ASSERT(rxq_obj->devx_cq);
119                 rxq_free_elts(rxq_obj->rxq_ctrl);
120                 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
121                 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
122                 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
123                                             rxq_obj->rxq_ctrl->rq_dbr_umem_id,
124                                             rxq_obj->rxq_ctrl->rq_dbr_offset));
125                 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
126                                             rxq_obj->rxq_ctrl->cq_dbr_umem_id,
127                                             rxq_obj->rxq_ctrl->cq_dbr_offset));
128                 if (rxq_obj->devx_channel)
129                         mlx5_glue->devx_destroy_event_channel
130                                                         (rxq_obj->devx_channel);
131                 rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
132                 rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
133         }
134         LIST_REMOVE(rxq_obj, next);
135         mlx5_free(rxq_obj);
136 }
137
138 /**
139  * Get event for an Rx DevX queue object.
140  *
141  * @param rxq_obj
142  *   DevX Rx queue object.
143  *
144  * @return
145  *   0 on success, a negative errno value otherwise and rte_errno is set.
146  */
147 static int
148 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
149 {
150 #ifdef HAVE_IBV_DEVX_EVENT
151         union {
152                 struct mlx5dv_devx_async_event_hdr event_resp;
153                 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
154         } out;
155         int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
156                                             &out.event_resp,
157                                             sizeof(out.buf));
158
159         if (ret < 0) {
160                 rte_errno = errno;
161                 return -rte_errno;
162         }
163         if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
164                 rte_errno = EINVAL;
165                 return -rte_errno;
166         }
167         return 0;
168 #else
169         (void)rxq_obj;
170         rte_errno = ENOTSUP;
171         return -rte_errno;
172 #endif /* HAVE_IBV_DEVX_EVENT */
173 }
174
175 /**
176  * Fill common fields of create RQ attributes structure.
177  *
178  * @param rxq_data
179  *   Pointer to Rx queue data.
180  * @param cqn
181  *   CQ number to use with this RQ.
182  * @param rq_attr
183  *   RQ attributes structure to fill..
184  */
185 static void
186 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
187                               struct mlx5_devx_create_rq_attr *rq_attr)
188 {
189         rq_attr->state = MLX5_RQC_STATE_RST;
190         rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
191         rq_attr->cqn = cqn;
192         rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
193 }
194
195 /**
196  * Fill common fields of DevX WQ attributes structure.
197  *
198  * @param priv
199  *   Pointer to device private data.
200  * @param rxq_ctrl
201  *   Pointer to Rx queue control structure.
202  * @param wq_attr
203  *   WQ attributes structure to fill..
204  */
205 static void
206 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
207                        struct mlx5_devx_wq_attr *wq_attr)
208 {
209         wq_attr->end_padding_mode = priv->config.cqe_pad ?
210                                         MLX5_WQ_END_PAD_MODE_ALIGN :
211                                         MLX5_WQ_END_PAD_MODE_NONE;
212         wq_attr->pd = priv->sh->pdn;
213         wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
214         wq_attr->dbr_umem_id = rxq_ctrl->rq_dbr_umem_id;
215         wq_attr->dbr_umem_valid = 1;
216         wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
217         wq_attr->wq_umem_valid = 1;
218 }
219
220 /**
221  * Create a RQ object using DevX.
222  *
223  * @param dev
224  *   Pointer to Ethernet device.
225  * @param idx
226  *   Queue index in DPDK Rx queue array.
227  * @param cqn
228  *   CQ number to use with this RQ.
229  *
230  * @return
231  *   The DevX object initialized, NULL otherwise and rte_errno is set.
232  */
233 static struct mlx5_devx_obj *
234 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
235 {
236         struct mlx5_priv *priv = dev->data->dev_private;
237         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
238         struct mlx5_rxq_ctrl *rxq_ctrl =
239                 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
240         struct mlx5_devx_create_rq_attr rq_attr = { 0 };
241         uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
242         uint32_t wq_size = 0;
243         uint32_t wqe_size = 0;
244         uint32_t log_wqe_size = 0;
245         void *buf = NULL;
246         struct mlx5_devx_obj *rq;
247
248         /* Fill RQ attributes. */
249         rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
250         rq_attr.flush_in_error_en = 1;
251         mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
252         /* Fill WQ attributes for this RQ. */
253         if (mlx5_rxq_mprq_enabled(rxq_data)) {
254                 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
255                 /*
256                  * Number of strides in each WQE:
257                  * 512*2^single_wqe_log_num_of_strides.
258                  */
259                 rq_attr.wq_attr.single_wqe_log_num_of_strides =
260                                 rxq_data->strd_num_n -
261                                 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
262                 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
263                 rq_attr.wq_attr.single_stride_log_num_of_bytes =
264                                 rxq_data->strd_sz_n -
265                                 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
266                 wqe_size = sizeof(struct mlx5_wqe_mprq);
267         } else {
268                 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
269                 wqe_size = sizeof(struct mlx5_wqe_data_seg);
270         }
271         log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
272         rq_attr.wq_attr.log_wq_stride = log_wqe_size;
273         rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
274         /* Calculate and allocate WQ memory space. */
275         wqe_size = 1 << log_wqe_size; /* round up power of two.*/
276         wq_size = wqe_n * wqe_size;
277         size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
278         if (alignment == (size_t)-1) {
279                 DRV_LOG(ERR, "Failed to get mem page size");
280                 rte_errno = ENOMEM;
281                 return NULL;
282         }
283         buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
284                           alignment, rxq_ctrl->socket);
285         if (!buf)
286                 return NULL;
287         rxq_data->wqes = buf;
288         rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
289                                                      buf, wq_size, 0);
290         if (!rxq_ctrl->wq_umem) {
291                 mlx5_free(buf);
292                 return NULL;
293         }
294         mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
295         rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
296         if (!rq)
297                 rxq_release_devx_rq_resources(rxq_ctrl);
298         return rq;
299 }
300
301 /**
302  * Create a DevX CQ object for an Rx queue.
303  *
304  * @param dev
305  *   Pointer to Ethernet device.
306  * @param cqe_n
307  *   Number of CQEs in CQ.
308  * @param idx
309  *   Queue index in DPDK Rx queue array.
310  * @param rxq_obj
311  *   Pointer to Rx queue object data.
312  *
313  * @return
314  *   The DevX object initialized, NULL otherwise and rte_errno is set.
315  */
316 static struct mlx5_devx_obj *
317 mlx5_devx_cq_new(struct rte_eth_dev *dev, unsigned int cqe_n, uint16_t idx,
318                  struct mlx5_rxq_obj *rxq_obj)
319 {
320         struct mlx5_devx_obj *cq_obj = 0;
321         struct mlx5_devx_cq_attr cq_attr = { 0 };
322         struct mlx5_priv *priv = dev->data->dev_private;
323         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
324         struct mlx5_rxq_ctrl *rxq_ctrl =
325                 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
326         size_t page_size = rte_mem_page_size();
327         uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
328         uint32_t eqn = 0;
329         void *buf = NULL;
330         uint16_t event_nums[1] = {0};
331         uint32_t log_cqe_n;
332         uint32_t cq_size;
333         int ret = 0;
334
335         if (page_size == (size_t)-1) {
336                 DRV_LOG(ERR, "Failed to get page_size.");
337                 goto error;
338         }
339         if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
340             !rxq_data->lro) {
341                 cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
342 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
343                 cq_attr.mini_cqe_res_format =
344                                 mlx5_rxq_mprq_enabled(rxq_data) ?
345                                 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
346                                 MLX5DV_CQE_RES_FORMAT_HASH;
347 #else
348                 cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
349 #endif
350                 /*
351                  * For vectorized Rx, it must not be doubled in order to
352                  * make cq_ci and rq_ci aligned.
353                  */
354                 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
355                         cqe_n *= 2;
356         } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
357                 DRV_LOG(DEBUG,
358                         "Port %u Rx CQE compression is disabled for HW"
359                         " timestamp.",
360                         dev->data->port_id);
361         } else if (priv->config.cqe_comp && rxq_data->lro) {
362                 DRV_LOG(DEBUG,
363                         "Port %u Rx CQE compression is disabled for LRO.",
364                         dev->data->port_id);
365         }
366 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
367         if (priv->config.cqe_pad)
368                 cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
369 #endif
370         log_cqe_n = log2above(cqe_n);
371         cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
372         /* Query the EQN for this core. */
373         if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
374                 DRV_LOG(ERR, "Failed to query EQN for CQ.");
375                 goto error;
376         }
377         cq_attr.eqn = eqn;
378         buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
379                                 rxq_ctrl->socket);
380         if (!buf) {
381                 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
382                 goto error;
383         }
384         rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
385         rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
386                                                      cq_size,
387                                                      IBV_ACCESS_LOCAL_WRITE);
388         if (!rxq_ctrl->cq_umem) {
389                 DRV_LOG(ERR, "Failed to register umem for CQ.");
390                 goto error;
391         }
392         cq_attr.uar_page_id =
393                         mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
394         cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
395         cq_attr.q_umem_valid = 1;
396         cq_attr.log_cq_size = log_cqe_n;
397         cq_attr.log_page_size = rte_log2_u32(page_size);
398         cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
399         cq_attr.db_umem_id = rxq_ctrl->cq_dbr_umem_id;
400         cq_attr.db_umem_valid = 1;
401         cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
402         if (!cq_obj)
403                 goto error;
404         rxq_data->cqe_n = log_cqe_n;
405         rxq_data->cqn = cq_obj->id;
406         if (rxq_obj->devx_channel) {
407                 ret = mlx5_glue->devx_subscribe_devx_event
408                                                 (rxq_obj->devx_channel,
409                                                  cq_obj->obj,
410                                                  sizeof(event_nums),
411                                                  event_nums,
412                                                  (uint64_t)(uintptr_t)cq_obj);
413                 if (ret) {
414                         DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
415                         rte_errno = errno;
416                         goto error;
417                 }
418         }
419         /* Initialise CQ to 1's to mark HW ownership for all CQEs. */
420         memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
421         return cq_obj;
422 error:
423         if (cq_obj)
424                 mlx5_devx_cmd_destroy(cq_obj);
425         rxq_release_devx_cq_resources(rxq_ctrl);
426         return NULL;
427 }
428
429 /**
430  * Create the Rx hairpin queue object.
431  *
432  * @param dev
433  *   Pointer to Ethernet device.
434  * @param idx
435  *   Queue index in DPDK Rx queue array.
436  *
437  * @return
438  *   The hairpin DevX object initialized, NULL otherwise and rte_errno is set.
439  */
440 static struct mlx5_rxq_obj *
441 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
442 {
443         struct mlx5_priv *priv = dev->data->dev_private;
444         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
445         struct mlx5_rxq_ctrl *rxq_ctrl =
446                 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
447         struct mlx5_devx_create_rq_attr attr = { 0 };
448         struct mlx5_rxq_obj *tmpl = NULL;
449         uint32_t max_wq_data;
450
451         MLX5_ASSERT(rxq_data);
452         MLX5_ASSERT(!rxq_ctrl->obj);
453         tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
454                            rxq_ctrl->socket);
455         if (!tmpl) {
456                 DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
457                         dev->data->port_id, rxq_data->idx);
458                 rte_errno = ENOMEM;
459                 return NULL;
460         }
461         tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
462         tmpl->rxq_ctrl = rxq_ctrl;
463         attr.hairpin = 1;
464         max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
465         /* Jumbo frames > 9KB should be supported, and more packets. */
466         if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
467                 if (priv->config.log_hp_size > max_wq_data) {
468                         DRV_LOG(ERR, "Total data size %u power of 2 is "
469                                 "too large for hairpin.",
470                                 priv->config.log_hp_size);
471                         mlx5_free(tmpl);
472                         rte_errno = ERANGE;
473                         return NULL;
474                 }
475                 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
476         } else {
477                 attr.wq_attr.log_hairpin_data_sz =
478                                 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
479                                  max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
480         }
481         /* Set the packets number to the maximum value for performance. */
482         attr.wq_attr.log_hairpin_num_packets =
483                         attr.wq_attr.log_hairpin_data_sz -
484                         MLX5_HAIRPIN_QUEUE_STRIDE;
485         tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
486                                            rxq_ctrl->socket);
487         if (!tmpl->rq) {
488                 DRV_LOG(ERR,
489                         "Port %u Rx hairpin queue %u can't create rq object.",
490                         dev->data->port_id, idx);
491                 mlx5_free(tmpl);
492                 rte_errno = errno;
493                 return NULL;
494         }
495         DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
496                 idx, (void *)&tmpl);
497         LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
498         dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
499         return tmpl;
500 }
501
502 /**
503  * Create the Rx queue DevX object.
504  *
505  * @param dev
506  *   Pointer to Ethernet device.
507  * @param idx
508  *   Queue index in DPDK Rx queue array.
509  *
510  * @return
511  *   The DevX object initialized, NULL otherwise and rte_errno is set.
512  */
513 static struct mlx5_rxq_obj *
514 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
515 {
516         struct mlx5_priv *priv = dev->data->dev_private;
517         struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
518         struct mlx5_rxq_ctrl *rxq_ctrl =
519                 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
520         unsigned int cqe_n;
521         unsigned int wqe_n = 1 << rxq_data->elts_n;
522         struct mlx5_rxq_obj *tmpl = NULL;
523         struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
524         struct mlx5_devx_dbr_page *cq_dbr_page = NULL;
525         struct mlx5_devx_dbr_page *rq_dbr_page = NULL;
526         int64_t dbr_offset;
527         int ret = 0;
528
529         MLX5_ASSERT(rxq_data);
530         MLX5_ASSERT(!rxq_ctrl->obj);
531         if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
532                 return mlx5_rxq_obj_hairpin_new(dev, idx);
533         tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
534                            rxq_ctrl->socket);
535         if (!tmpl) {
536                 DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
537                         dev->data->port_id, rxq_data->idx);
538                 rte_errno = ENOMEM;
539                 goto error;
540         }
541         tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
542         tmpl->rxq_ctrl = rxq_ctrl;
543         if (rxq_ctrl->irq) {
544                 int devx_ev_flag =
545                           MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
546
547                 tmpl->devx_channel = mlx5_glue->devx_create_event_channel
548                                                                 (priv->sh->ctx,
549                                                                  devx_ev_flag);
550                 if (!tmpl->devx_channel) {
551                         rte_errno = errno;
552                         DRV_LOG(ERR, "Failed to create event channel %d.",
553                                 rte_errno);
554                         goto error;
555                 }
556                 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
557         }
558         if (mlx5_rxq_mprq_enabled(rxq_data))
559                 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
560         else
561                 cqe_n = wqe_n - 1;
562         DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
563                 dev->data->port_id, priv->sh->device_attr.max_qp_wr);
564         DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
565                 dev->data->port_id, priv->sh->device_attr.max_sge);
566         /* Allocate CQ door-bell. */
567         dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &cq_dbr_page);
568         if (dbr_offset < 0) {
569                 DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
570                 goto error;
571         }
572         rxq_ctrl->cq_dbr_offset = dbr_offset;
573         rxq_ctrl->cq_dbr_umem_id = mlx5_os_get_umem_id(cq_dbr_page->umem);
574         rxq_data->cq_db = (uint32_t *)((uintptr_t)cq_dbr_page->dbrs +
575                                        (uintptr_t)rxq_ctrl->cq_dbr_offset);
576         rxq_data->cq_uar =
577                         mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
578         /* Create CQ using DevX API. */
579         tmpl->devx_cq = mlx5_devx_cq_new(dev, cqe_n, idx, tmpl);
580         if (!tmpl->devx_cq) {
581                 DRV_LOG(ERR, "Failed to create CQ.");
582                 goto error;
583         }
584         /* Allocate RQ door-bell. */
585         dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &rq_dbr_page);
586         if (dbr_offset < 0) {
587                 DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
588                 goto error;
589         }
590         rxq_ctrl->rq_dbr_offset = dbr_offset;
591         rxq_ctrl->rq_dbr_umem_id = mlx5_os_get_umem_id(rq_dbr_page->umem);
592         rxq_data->rq_db = (uint32_t *)((uintptr_t)rq_dbr_page->dbrs +
593                                        (uintptr_t)rxq_ctrl->rq_dbr_offset);
594         /* Create RQ using DevX API. */
595         tmpl->rq = mlx5_devx_rq_new(dev, idx, tmpl->devx_cq->id);
596         if (!tmpl->rq) {
597                 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
598                         dev->data->port_id, idx);
599                 rte_errno = ENOMEM;
600                 goto error;
601         }
602         /* Change queue state to ready. */
603         rq_attr.rq_state = MLX5_RQC_STATE_RST;
604         rq_attr.state = MLX5_RQC_STATE_RDY;
605         ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
606         if (ret)
607                 goto error;
608         rxq_data->cq_arm_sn = 0;
609         mlx5_rxq_initialize(rxq_data);
610         rxq_data->cq_ci = 0;
611         DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
612                 idx, (void *)&tmpl);
613         LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
614         dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
615         rxq_ctrl->wqn = tmpl->rq->id;
616         return tmpl;
617 error:
618         if (tmpl) {
619                 ret = rte_errno; /* Save rte_errno before cleanup. */
620                 if (tmpl->rq)
621                         claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
622                 if (tmpl->devx_cq)
623                         claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
624                 if (tmpl->devx_channel)
625                         mlx5_glue->devx_destroy_event_channel
626                                                         (tmpl->devx_channel);
627                 mlx5_free(tmpl);
628                 rte_errno = ret; /* Restore rte_errno. */
629         }
630         if (rq_dbr_page)
631                 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
632                                             rxq_ctrl->rq_dbr_umem_id,
633                                             rxq_ctrl->rq_dbr_offset));
634         if (cq_dbr_page)
635                 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
636                                             rxq_ctrl->cq_dbr_umem_id,
637                                             rxq_ctrl->cq_dbr_offset));
638         rxq_release_devx_rq_resources(rxq_ctrl);
639         rxq_release_devx_cq_resources(rxq_ctrl);
640         return NULL;
641 }
642
643 struct mlx5_obj_ops devx_obj_ops = {
644         .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
645         .rxq_obj_new = mlx5_rxq_devx_obj_new,
646         .rxq_event_get = mlx5_rx_devx_get_event,
647         .rxq_obj_release = mlx5_rxq_devx_obj_release,
648 };