1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
11 #include <rte_malloc.h>
12 #include <rte_common.h>
13 #include <rte_eal_paging.h>
15 #include <mlx5_glue.h>
16 #include <mlx5_devx_cmds.h>
17 #include <mlx5_malloc.h>
20 #include "mlx5_common_os.h"
21 #include "mlx5_rxtx.h"
22 #include "mlx5_utils.h"
23 #include "mlx5_devx.h"
26 * Modify RQ vlan stripping offload
31 * @return 0 on success, non-0 otherwise
34 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
36 struct mlx5_devx_modify_rq_attr rq_attr;
38 memset(&rq_attr, 0, sizeof(rq_attr));
39 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
40 rq_attr.state = MLX5_RQC_STATE_RDY;
41 rq_attr.vsd = (on ? 0 : 1);
42 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
43 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
47 * Release the resources allocated for an RQ DevX object.
50 * DevX Rx queue object.
53 rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
55 if (rxq_ctrl->rxq.wqes) {
56 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
57 rxq_ctrl->rxq.wqes = NULL;
59 if (rxq_ctrl->wq_umem) {
60 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
61 rxq_ctrl->wq_umem = NULL;
66 * Release the resources allocated for the Rx CQ DevX object.
69 * DevX Rx queue object.
72 rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
74 if (rxq_ctrl->rxq.cqes) {
75 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
76 rxq_ctrl->rxq.cqes = NULL;
78 if (rxq_ctrl->cq_umem) {
79 mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
80 rxq_ctrl->cq_umem = NULL;
85 * Release an Rx hairpin related resources.
88 * Hairpin Rx queue object.
91 mlx5_rxq_obj_hairpin_release(struct mlx5_rxq_obj *rxq_obj)
93 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
96 rq_attr.state = MLX5_RQC_STATE_RST;
97 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
98 mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
99 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
103 * Release an Rx DevX queue object.
106 * DevX Rx queue object.
109 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
111 struct mlx5_priv *priv = rxq_obj->rxq_ctrl->priv;
113 MLX5_ASSERT(rxq_obj);
114 MLX5_ASSERT(rxq_obj->rq);
115 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) {
116 mlx5_rxq_obj_hairpin_release(rxq_obj);
118 MLX5_ASSERT(rxq_obj->devx_cq);
119 rxq_free_elts(rxq_obj->rxq_ctrl);
120 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
121 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
122 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
123 rxq_obj->rxq_ctrl->rq_dbr_umem_id,
124 rxq_obj->rxq_ctrl->rq_dbr_offset));
125 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
126 rxq_obj->rxq_ctrl->cq_dbr_umem_id,
127 rxq_obj->rxq_ctrl->cq_dbr_offset));
128 if (rxq_obj->devx_channel)
129 mlx5_glue->devx_destroy_event_channel
130 (rxq_obj->devx_channel);
131 rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
132 rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
134 LIST_REMOVE(rxq_obj, next);
139 * Get event for an Rx DevX queue object.
142 * DevX Rx queue object.
145 * 0 on success, a negative errno value otherwise and rte_errno is set.
148 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
150 #ifdef HAVE_IBV_DEVX_EVENT
152 struct mlx5dv_devx_async_event_hdr event_resp;
153 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
155 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
163 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
172 #endif /* HAVE_IBV_DEVX_EVENT */
176 * Fill common fields of create RQ attributes structure.
179 * Pointer to Rx queue data.
181 * CQ number to use with this RQ.
183 * RQ attributes structure to fill..
186 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
187 struct mlx5_devx_create_rq_attr *rq_attr)
189 rq_attr->state = MLX5_RQC_STATE_RST;
190 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
192 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
196 * Fill common fields of DevX WQ attributes structure.
199 * Pointer to device private data.
201 * Pointer to Rx queue control structure.
203 * WQ attributes structure to fill..
206 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
207 struct mlx5_devx_wq_attr *wq_attr)
209 wq_attr->end_padding_mode = priv->config.cqe_pad ?
210 MLX5_WQ_END_PAD_MODE_ALIGN :
211 MLX5_WQ_END_PAD_MODE_NONE;
212 wq_attr->pd = priv->sh->pdn;
213 wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
214 wq_attr->dbr_umem_id = rxq_ctrl->rq_dbr_umem_id;
215 wq_attr->dbr_umem_valid = 1;
216 wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
217 wq_attr->wq_umem_valid = 1;
221 * Create a RQ object using DevX.
224 * Pointer to Ethernet device.
226 * Queue index in DPDK Rx queue array.
228 * CQ number to use with this RQ.
231 * The DevX object initialized, NULL otherwise and rte_errno is set.
233 static struct mlx5_devx_obj *
234 mlx5_devx_rq_new(struct rte_eth_dev *dev, uint16_t idx, uint32_t cqn)
236 struct mlx5_priv *priv = dev->data->dev_private;
237 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
238 struct mlx5_rxq_ctrl *rxq_ctrl =
239 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
240 struct mlx5_devx_create_rq_attr rq_attr = { 0 };
241 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
242 uint32_t wq_size = 0;
243 uint32_t wqe_size = 0;
244 uint32_t log_wqe_size = 0;
246 struct mlx5_devx_obj *rq;
248 /* Fill RQ attributes. */
249 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
250 rq_attr.flush_in_error_en = 1;
251 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
252 /* Fill WQ attributes for this RQ. */
253 if (mlx5_rxq_mprq_enabled(rxq_data)) {
254 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
256 * Number of strides in each WQE:
257 * 512*2^single_wqe_log_num_of_strides.
259 rq_attr.wq_attr.single_wqe_log_num_of_strides =
260 rxq_data->strd_num_n -
261 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
262 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
263 rq_attr.wq_attr.single_stride_log_num_of_bytes =
264 rxq_data->strd_sz_n -
265 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
266 wqe_size = sizeof(struct mlx5_wqe_mprq);
268 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
269 wqe_size = sizeof(struct mlx5_wqe_data_seg);
271 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
272 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
273 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
274 /* Calculate and allocate WQ memory space. */
275 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
276 wq_size = wqe_n * wqe_size;
277 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
278 if (alignment == (size_t)-1) {
279 DRV_LOG(ERR, "Failed to get mem page size");
283 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
284 alignment, rxq_ctrl->socket);
287 rxq_data->wqes = buf;
288 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
290 if (!rxq_ctrl->wq_umem) {
294 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
295 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
297 rxq_release_devx_rq_resources(rxq_ctrl);
302 * Create a DevX CQ object for an Rx queue.
305 * Pointer to Ethernet device.
307 * Number of CQEs in CQ.
309 * Queue index in DPDK Rx queue array.
311 * Pointer to Rx queue object data.
314 * The DevX object initialized, NULL otherwise and rte_errno is set.
316 static struct mlx5_devx_obj *
317 mlx5_devx_cq_new(struct rte_eth_dev *dev, unsigned int cqe_n, uint16_t idx,
318 struct mlx5_rxq_obj *rxq_obj)
320 struct mlx5_devx_obj *cq_obj = 0;
321 struct mlx5_devx_cq_attr cq_attr = { 0 };
322 struct mlx5_priv *priv = dev->data->dev_private;
323 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
324 struct mlx5_rxq_ctrl *rxq_ctrl =
325 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
326 size_t page_size = rte_mem_page_size();
327 uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
330 uint16_t event_nums[1] = {0};
335 if (page_size == (size_t)-1) {
336 DRV_LOG(ERR, "Failed to get page_size.");
339 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
341 cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
342 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
343 cq_attr.mini_cqe_res_format =
344 mlx5_rxq_mprq_enabled(rxq_data) ?
345 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
346 MLX5DV_CQE_RES_FORMAT_HASH;
348 cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
351 * For vectorized Rx, it must not be doubled in order to
352 * make cq_ci and rq_ci aligned.
354 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
356 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
358 "Port %u Rx CQE compression is disabled for HW"
361 } else if (priv->config.cqe_comp && rxq_data->lro) {
363 "Port %u Rx CQE compression is disabled for LRO.",
366 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
367 if (priv->config.cqe_pad)
368 cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
370 log_cqe_n = log2above(cqe_n);
371 cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
372 /* Query the EQN for this core. */
373 if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
374 DRV_LOG(ERR, "Failed to query EQN for CQ.");
378 buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
381 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
384 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
385 rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
387 IBV_ACCESS_LOCAL_WRITE);
388 if (!rxq_ctrl->cq_umem) {
389 DRV_LOG(ERR, "Failed to register umem for CQ.");
392 cq_attr.uar_page_id =
393 mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
394 cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
395 cq_attr.q_umem_valid = 1;
396 cq_attr.log_cq_size = log_cqe_n;
397 cq_attr.log_page_size = rte_log2_u32(page_size);
398 cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
399 cq_attr.db_umem_id = rxq_ctrl->cq_dbr_umem_id;
400 cq_attr.db_umem_valid = 1;
401 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
404 rxq_data->cqe_n = log_cqe_n;
405 rxq_data->cqn = cq_obj->id;
406 if (rxq_obj->devx_channel) {
407 ret = mlx5_glue->devx_subscribe_devx_event
408 (rxq_obj->devx_channel,
412 (uint64_t)(uintptr_t)cq_obj);
414 DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
419 /* Initialise CQ to 1's to mark HW ownership for all CQEs. */
420 memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
424 mlx5_devx_cmd_destroy(cq_obj);
425 rxq_release_devx_cq_resources(rxq_ctrl);
430 * Create the Rx hairpin queue object.
433 * Pointer to Ethernet device.
435 * Queue index in DPDK Rx queue array.
438 * The hairpin DevX object initialized, NULL otherwise and rte_errno is set.
440 static struct mlx5_rxq_obj *
441 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
443 struct mlx5_priv *priv = dev->data->dev_private;
444 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
445 struct mlx5_rxq_ctrl *rxq_ctrl =
446 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
447 struct mlx5_devx_create_rq_attr attr = { 0 };
448 struct mlx5_rxq_obj *tmpl = NULL;
449 uint32_t max_wq_data;
451 MLX5_ASSERT(rxq_data);
452 MLX5_ASSERT(!rxq_ctrl->obj);
453 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
456 DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
457 dev->data->port_id, rxq_data->idx);
461 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
462 tmpl->rxq_ctrl = rxq_ctrl;
464 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
465 /* Jumbo frames > 9KB should be supported, and more packets. */
466 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
467 if (priv->config.log_hp_size > max_wq_data) {
468 DRV_LOG(ERR, "Total data size %u power of 2 is "
469 "too large for hairpin.",
470 priv->config.log_hp_size);
475 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
477 attr.wq_attr.log_hairpin_data_sz =
478 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
479 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
481 /* Set the packets number to the maximum value for performance. */
482 attr.wq_attr.log_hairpin_num_packets =
483 attr.wq_attr.log_hairpin_data_sz -
484 MLX5_HAIRPIN_QUEUE_STRIDE;
485 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
489 "Port %u Rx hairpin queue %u can't create rq object.",
490 dev->data->port_id, idx);
495 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
497 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
498 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
503 * Create the Rx queue DevX object.
506 * Pointer to Ethernet device.
508 * Queue index in DPDK Rx queue array.
511 * The DevX object initialized, NULL otherwise and rte_errno is set.
513 static struct mlx5_rxq_obj *
514 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
516 struct mlx5_priv *priv = dev->data->dev_private;
517 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
518 struct mlx5_rxq_ctrl *rxq_ctrl =
519 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
521 unsigned int wqe_n = 1 << rxq_data->elts_n;
522 struct mlx5_rxq_obj *tmpl = NULL;
523 struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
524 struct mlx5_devx_dbr_page *cq_dbr_page = NULL;
525 struct mlx5_devx_dbr_page *rq_dbr_page = NULL;
529 MLX5_ASSERT(rxq_data);
530 MLX5_ASSERT(!rxq_ctrl->obj);
531 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
532 return mlx5_rxq_obj_hairpin_new(dev, idx);
533 tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl), 0,
536 DRV_LOG(ERR, "port %u Rx queue %u cannot allocate resources",
537 dev->data->port_id, rxq_data->idx);
541 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
542 tmpl->rxq_ctrl = rxq_ctrl;
545 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
547 tmpl->devx_channel = mlx5_glue->devx_create_event_channel
550 if (!tmpl->devx_channel) {
552 DRV_LOG(ERR, "Failed to create event channel %d.",
556 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
558 if (mlx5_rxq_mprq_enabled(rxq_data))
559 cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
562 DRV_LOG(DEBUG, "port %u device_attr.max_qp_wr is %d",
563 dev->data->port_id, priv->sh->device_attr.max_qp_wr);
564 DRV_LOG(DEBUG, "port %u device_attr.max_sge is %d",
565 dev->data->port_id, priv->sh->device_attr.max_sge);
566 /* Allocate CQ door-bell. */
567 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &cq_dbr_page);
568 if (dbr_offset < 0) {
569 DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
572 rxq_ctrl->cq_dbr_offset = dbr_offset;
573 rxq_ctrl->cq_dbr_umem_id = mlx5_os_get_umem_id(cq_dbr_page->umem);
574 rxq_data->cq_db = (uint32_t *)((uintptr_t)cq_dbr_page->dbrs +
575 (uintptr_t)rxq_ctrl->cq_dbr_offset);
577 mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
578 /* Create CQ using DevX API. */
579 tmpl->devx_cq = mlx5_devx_cq_new(dev, cqe_n, idx, tmpl);
580 if (!tmpl->devx_cq) {
581 DRV_LOG(ERR, "Failed to create CQ.");
584 /* Allocate RQ door-bell. */
585 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &rq_dbr_page);
586 if (dbr_offset < 0) {
587 DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
590 rxq_ctrl->rq_dbr_offset = dbr_offset;
591 rxq_ctrl->rq_dbr_umem_id = mlx5_os_get_umem_id(rq_dbr_page->umem);
592 rxq_data->rq_db = (uint32_t *)((uintptr_t)rq_dbr_page->dbrs +
593 (uintptr_t)rxq_ctrl->rq_dbr_offset);
594 /* Create RQ using DevX API. */
595 tmpl->rq = mlx5_devx_rq_new(dev, idx, tmpl->devx_cq->id);
597 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
598 dev->data->port_id, idx);
602 /* Change queue state to ready. */
603 rq_attr.rq_state = MLX5_RQC_STATE_RST;
604 rq_attr.state = MLX5_RQC_STATE_RDY;
605 ret = mlx5_devx_cmd_modify_rq(tmpl->rq, &rq_attr);
608 rxq_data->cq_arm_sn = 0;
609 mlx5_rxq_initialize(rxq_data);
611 DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id,
613 LIST_INSERT_HEAD(&priv->rxqsobj, tmpl, next);
614 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
615 rxq_ctrl->wqn = tmpl->rq->id;
619 ret = rte_errno; /* Save rte_errno before cleanup. */
621 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
623 claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
624 if (tmpl->devx_channel)
625 mlx5_glue->devx_destroy_event_channel
626 (tmpl->devx_channel);
628 rte_errno = ret; /* Restore rte_errno. */
631 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
632 rxq_ctrl->rq_dbr_umem_id,
633 rxq_ctrl->rq_dbr_offset));
635 claim_zero(mlx5_release_dbr(&priv->dbrpgs,
636 rxq_ctrl->cq_dbr_umem_id,
637 rxq_ctrl->cq_dbr_offset));
638 rxq_release_devx_rq_resources(rxq_ctrl);
639 rxq_release_devx_cq_resources(rxq_ctrl);
643 struct mlx5_obj_ops devx_obj_ops = {
644 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
645 .rxq_obj_new = mlx5_rxq_devx_obj_new,
646 .rxq_event_get = mlx5_rx_devx_get_event,
647 .rxq_obj_release = mlx5_rxq_devx_obj_release,