1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
12 #include <sys/queue.h>
14 #include "mlx5_autoconf.h"
17 #include <rte_malloc.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_common.h>
21 #include <mlx5_glue.h>
22 #include <mlx5_common.h>
23 #include <mlx5_common_mr.h>
24 #include <mlx5_rxtx.h>
25 #include <mlx5_verbs.h>
26 #include <mlx5_utils.h>
27 #include <mlx5_malloc.h>
30 * Register mr. Given protection domain pointer, pointer to addr and length
31 * register the memory region.
34 * Pointer to protection domain context.
36 * Pointer to memory start address.
38 * Length of the memory to register.
40 * pmd_mr struct set with lkey, address, length and pointer to mr object
43 * 0 on successful registration, -1 otherwise
46 mlx5_reg_mr(void *pd, void *addr, size_t length,
47 struct mlx5_pmd_mr *pmd_mr)
49 return mlx5_common_verbs_reg_mr(pd, addr, length, pmd_mr);
53 * Deregister mr. Given the mlx5 pmd MR - deregister the MR
56 * pmd_mr struct set with lkey, address, length and pointer to mr object
60 mlx5_dereg_mr(struct mlx5_pmd_mr *pmd_mr)
62 mlx5_common_verbs_dereg_mr(pmd_mr);
65 /* verbs operations. */
66 const struct mlx5_verbs_ops mlx5_verbs_ops = {
67 .reg_mr = mlx5_reg_mr,
68 .dereg_mr = mlx5_dereg_mr,
72 * Modify Rx WQ vlan stripping offload
77 * @return 0 on success, non-0 otherwise
80 mlx5_rxq_obj_modify_wq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
82 uint16_t vlan_offloads =
83 (on ? IBV_WQ_FLAGS_CVLAN_STRIPPING : 0) |
85 struct ibv_wq_attr mod;
86 mod = (struct ibv_wq_attr){
87 .attr_mask = IBV_WQ_ATTR_FLAGS,
88 .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING,
89 .flags = vlan_offloads,
92 return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
96 * Modifies the attributes for the specified WQ.
99 * Verbs Rx queue object.
102 * 0 on success, a negative errno value otherwise and rte_errno is set.
105 mlx5_ibv_modify_wq(struct mlx5_rxq_obj *rxq_obj, bool is_start)
107 struct ibv_wq_attr mod = {
108 .attr_mask = IBV_WQ_ATTR_STATE,
109 .wq_state = is_start ? IBV_WQS_RDY : IBV_WQS_RESET,
112 return mlx5_glue->modify_wq(rxq_obj->wq, &mod);
116 * Create a CQ Verbs object.
119 * Pointer to Ethernet device.
121 * Queue index in DPDK Rx queue array.
124 * The Verbs CQ object initialized, NULL otherwise and rte_errno is set.
126 static struct ibv_cq *
127 mlx5_rxq_ibv_cq_create(struct rte_eth_dev *dev, uint16_t idx)
129 struct mlx5_priv *priv = dev->data->dev_private;
130 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
131 struct mlx5_rxq_ctrl *rxq_ctrl =
132 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
133 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
134 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
136 struct ibv_cq_init_attr_ex ibv;
137 struct mlx5dv_cq_init_attr mlx5;
140 cq_attr.ibv = (struct ibv_cq_init_attr_ex){
142 .channel = rxq_obj->ibv_channel,
145 cq_attr.mlx5 = (struct mlx5dv_cq_init_attr){
148 if (priv->config.cqe_comp && !rxq_data->hw_timestamp) {
149 cq_attr.mlx5.comp_mask |=
150 MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
151 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
152 cq_attr.mlx5.cqe_comp_res_format =
153 mlx5_rxq_mprq_enabled(rxq_data) ?
154 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
155 MLX5DV_CQE_RES_FORMAT_HASH;
157 cq_attr.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
160 * For vectorized Rx, it must not be doubled in order to
161 * make cq_ci and rq_ci aligned.
163 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
164 cq_attr.ibv.cqe *= 2;
165 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
167 "Port %u Rx CQE compression is disabled for HW"
171 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
172 if (priv->config.cqe_pad) {
173 cq_attr.mlx5.comp_mask |= MLX5DV_CQ_INIT_ATTR_MASK_FLAGS;
174 cq_attr.mlx5.flags |= MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
177 return mlx5_glue->cq_ex_to_cq(mlx5_glue->dv_create_cq(priv->sh->ctx,
183 * Create a WQ Verbs object.
186 * Pointer to Ethernet device.
188 * Queue index in DPDK Rx queue array.
191 * The Verbs WQ object initialized, NULL otherwise and rte_errno is set.
193 static struct ibv_wq *
194 mlx5_rxq_ibv_wq_create(struct rte_eth_dev *dev, uint16_t idx)
196 struct mlx5_priv *priv = dev->data->dev_private;
197 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
198 struct mlx5_rxq_ctrl *rxq_ctrl =
199 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
200 struct mlx5_rxq_obj *rxq_obj = rxq_ctrl->obj;
201 unsigned int wqe_n = 1 << rxq_data->elts_n;
203 struct ibv_wq_init_attr ibv;
204 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
205 struct mlx5dv_wq_init_attr mlx5;
209 wq_attr.ibv = (struct ibv_wq_init_attr){
210 .wq_context = NULL, /* Could be useful in the future. */
211 .wq_type = IBV_WQT_RQ,
212 /* Max number of outstanding WRs. */
213 .max_wr = wqe_n >> rxq_data->sges_n,
214 /* Max number of scatter/gather elements in a WR. */
215 .max_sge = 1 << rxq_data->sges_n,
217 .cq = rxq_obj->ibv_cq,
218 .comp_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING | 0,
219 .create_flags = (rxq_data->vlan_strip ?
220 IBV_WQ_FLAGS_CVLAN_STRIPPING : 0),
222 /* By default, FCS (CRC) is stripped by hardware. */
223 if (rxq_data->crc_present) {
224 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_SCATTER_FCS;
225 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
227 if (priv->config.hw_padding) {
228 #if defined(HAVE_IBV_WQ_FLAG_RX_END_PADDING)
229 wq_attr.ibv.create_flags |= IBV_WQ_FLAG_RX_END_PADDING;
230 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
231 #elif defined(HAVE_IBV_WQ_FLAGS_PCI_WRITE_END_PADDING)
232 wq_attr.ibv.create_flags |= IBV_WQ_FLAGS_PCI_WRITE_END_PADDING;
233 wq_attr.ibv.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS;
236 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
237 wq_attr.mlx5 = (struct mlx5dv_wq_init_attr){
240 if (mlx5_rxq_mprq_enabled(rxq_data)) {
241 struct mlx5dv_striding_rq_init_attr *mprq_attr =
242 &wq_attr.mlx5.striding_rq_attrs;
244 wq_attr.mlx5.comp_mask |= MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
245 *mprq_attr = (struct mlx5dv_striding_rq_init_attr){
246 .single_stride_log_num_of_bytes = rxq_data->strd_sz_n,
247 .single_wqe_log_num_of_strides = rxq_data->strd_num_n,
248 .two_byte_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT,
251 rxq_obj->wq = mlx5_glue->dv_create_wq(priv->sh->ctx, &wq_attr.ibv,
254 rxq_obj->wq = mlx5_glue->create_wq(priv->sh->ctx, &wq_attr.ibv);
258 * Make sure number of WRs*SGEs match expectations since a queue
259 * cannot allocate more than "desc" buffers.
261 if (wq_attr.ibv.max_wr != (wqe_n >> rxq_data->sges_n) ||
262 wq_attr.ibv.max_sge != (1u << rxq_data->sges_n)) {
264 "Port %u Rx queue %u requested %u*%u but got"
266 dev->data->port_id, idx,
267 wqe_n >> rxq_data->sges_n,
268 (1 << rxq_data->sges_n),
269 wq_attr.ibv.max_wr, wq_attr.ibv.max_sge);
270 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
279 * Create the Rx queue Verbs object.
282 * Pointer to Ethernet device.
284 * Queue index in DPDK Rx queue array.
287 * 0 on success, a negative errno value otherwise and rte_errno is set.
290 mlx5_rxq_ibv_obj_new(struct rte_eth_dev *dev, uint16_t idx)
292 struct mlx5_priv *priv = dev->data->dev_private;
293 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
294 struct mlx5_rxq_ctrl *rxq_ctrl =
295 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
296 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
297 struct mlx5dv_cq cq_info;
298 struct mlx5dv_rwq rwq;
300 struct mlx5dv_obj obj;
302 MLX5_ASSERT(rxq_data);
304 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
305 priv->verbs_alloc_ctx.obj = rxq_ctrl;
306 tmpl->type = MLX5_RXQ_OBJ_TYPE_IBV;
307 tmpl->rxq_ctrl = rxq_ctrl;
310 mlx5_glue->create_comp_channel(priv->sh->ctx);
311 if (!tmpl->ibv_channel) {
312 DRV_LOG(ERR, "Port %u: comp channel creation failure.",
317 tmpl->fd = ((struct ibv_comp_channel *)(tmpl->ibv_channel))->fd;
319 /* Create CQ using Verbs API. */
320 tmpl->ibv_cq = mlx5_rxq_ibv_cq_create(dev, idx);
322 DRV_LOG(ERR, "Port %u Rx queue %u CQ creation failure.",
323 dev->data->port_id, idx);
327 obj.cq.in = tmpl->ibv_cq;
328 obj.cq.out = &cq_info;
329 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ);
334 if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) {
336 "Port %u wrong MLX5_CQE_SIZE environment "
337 "variable value: it should be set to %u.",
338 dev->data->port_id, RTE_CACHE_LINE_SIZE);
342 /* Fill the rings. */
343 rxq_data->cqe_n = log2above(cq_info.cqe_cnt);
344 rxq_data->cq_db = cq_info.dbrec;
345 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
346 rxq_data->cq_uar = cq_info.cq_uar;
347 rxq_data->cqn = cq_info.cqn;
348 /* Create WQ (RQ) using Verbs API. */
349 tmpl->wq = mlx5_rxq_ibv_wq_create(dev, idx);
351 DRV_LOG(ERR, "Port %u Rx queue %u WQ creation failure.",
352 dev->data->port_id, idx);
356 /* Change queue state to ready. */
357 ret = mlx5_ibv_modify_wq(tmpl, true);
360 "Port %u Rx queue %u WQ state to IBV_WQS_RDY failed.",
361 dev->data->port_id, idx);
365 obj.rwq.in = tmpl->wq;
367 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_RWQ);
372 rxq_data->wqes = rwq.buf;
373 rxq_data->rq_db = rwq.dbrec;
374 rxq_data->cq_arm_sn = 0;
375 mlx5_rxq_initialize(rxq_data);
377 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
378 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
379 rxq_ctrl->wqn = ((struct ibv_wq *)(tmpl->wq))->wq_num;
382 ret = rte_errno; /* Save rte_errno before cleanup. */
384 claim_zero(mlx5_glue->destroy_wq(tmpl->wq));
386 claim_zero(mlx5_glue->destroy_cq(tmpl->ibv_cq));
387 if (tmpl->ibv_channel)
388 claim_zero(mlx5_glue->destroy_comp_channel(tmpl->ibv_channel));
389 rte_errno = ret; /* Restore rte_errno. */
390 priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE;
395 * Release an Rx verbs queue object.
398 * Verbs Rx queue object.
401 mlx5_rxq_ibv_obj_release(struct mlx5_rxq_obj *rxq_obj)
403 MLX5_ASSERT(rxq_obj);
404 MLX5_ASSERT(rxq_obj->wq);
405 MLX5_ASSERT(rxq_obj->ibv_cq);
406 claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
407 claim_zero(mlx5_glue->destroy_cq(rxq_obj->ibv_cq));
408 if (rxq_obj->ibv_channel)
409 claim_zero(mlx5_glue->destroy_comp_channel
410 (rxq_obj->ibv_channel));
414 * Get event for an Rx verbs queue object.
417 * Verbs Rx queue object.
420 * 0 on success, a negative errno value otherwise and rte_errno is set.
423 mlx5_rx_ibv_get_event(struct mlx5_rxq_obj *rxq_obj)
425 struct ibv_cq *ev_cq;
427 int ret = mlx5_glue->get_cq_event(rxq_obj->ibv_channel,
430 if (ret < 0 || ev_cq != rxq_obj->ibv_cq)
432 mlx5_glue->ack_cq_events(rxq_obj->ibv_cq, 1);
443 * Creates a receive work queue as a filed of indirection table.
446 * Pointer to Ethernet device.
448 * Log of number of queues in the array.
450 * Verbs indirection table object.
453 * 0 on success, a negative errno value otherwise and rte_errno is set.
456 mlx5_ibv_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
457 struct mlx5_ind_table_obj *ind_tbl)
459 struct mlx5_priv *priv = dev->data->dev_private;
460 struct ibv_wq *wq[1 << log_n];
463 MLX5_ASSERT(ind_tbl);
464 for (i = 0; i != ind_tbl->queues_n; ++i) {
465 struct mlx5_rxq_data *rxq = (*priv->rxqs)[ind_tbl->queues[i]];
466 struct mlx5_rxq_ctrl *rxq_ctrl =
467 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
469 wq[i] = rxq_ctrl->obj->wq;
472 /* Finalise indirection table. */
473 for (j = 0; i != (unsigned int)(1 << log_n); ++j, ++i)
475 ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table(priv->sh->ctx,
476 &(struct ibv_rwq_ind_table_init_attr){
477 .log_ind_tbl_size = log_n,
481 if (!ind_tbl->ind_table) {
489 * Destroys the specified Indirection Table.
492 * Indirection table to release.
495 mlx5_ibv_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
497 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
501 * Create an Rx Hash queue.
504 * Pointer to Ethernet device.
506 * Pointer to Rx Hash queue.
511 * 0 on success, a negative errno value otherwise and rte_errno is set.
514 mlx5_ibv_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
515 int tunnel __rte_unused)
517 struct mlx5_priv *priv = dev->data->dev_private;
518 struct ibv_qp *qp = NULL;
519 struct mlx5_ind_table_obj *ind_tbl = hrxq->ind_table;
520 const uint8_t *rss_key = hrxq->rss_key;
521 uint64_t hash_fields = hrxq->hash_fields;
523 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
524 struct mlx5dv_qp_init_attr qp_init_attr;
526 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
528 qp_init_attr.comp_mask =
529 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
530 qp_init_attr.create_flags = MLX5DV_QP_CREATE_TUNNEL_OFFLOADS;
532 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
533 if (dev->data->dev_conf.lpbk_mode) {
534 /* Allow packet sent from NIC loop back w/o source MAC check. */
535 qp_init_attr.comp_mask |=
536 MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS;
537 qp_init_attr.create_flags |=
538 MLX5DV_QP_CREATE_TIR_ALLOW_SELF_LOOPBACK_UC;
541 qp = mlx5_glue->dv_create_qp
543 &(struct ibv_qp_init_attr_ex){
544 .qp_type = IBV_QPT_RAW_PACKET,
546 IBV_QP_INIT_ATTR_PD |
547 IBV_QP_INIT_ATTR_IND_TABLE |
548 IBV_QP_INIT_ATTR_RX_HASH,
549 .rx_hash_conf = (struct ibv_rx_hash_conf){
551 IBV_RX_HASH_FUNC_TOEPLITZ,
552 .rx_hash_key_len = hrxq->rss_key_len,
554 (void *)(uintptr_t)rss_key,
555 .rx_hash_fields_mask = hash_fields,
557 .rwq_ind_tbl = ind_tbl->ind_table,
562 qp = mlx5_glue->create_qp_ex
564 &(struct ibv_qp_init_attr_ex){
565 .qp_type = IBV_QPT_RAW_PACKET,
567 IBV_QP_INIT_ATTR_PD |
568 IBV_QP_INIT_ATTR_IND_TABLE |
569 IBV_QP_INIT_ATTR_RX_HASH,
570 .rx_hash_conf = (struct ibv_rx_hash_conf){
572 IBV_RX_HASH_FUNC_TOEPLITZ,
573 .rx_hash_key_len = hrxq->rss_key_len,
575 (void *)(uintptr_t)rss_key,
576 .rx_hash_fields_mask = hash_fields,
578 .rwq_ind_tbl = ind_tbl->ind_table,
587 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
588 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
596 err = rte_errno; /* Save rte_errno before cleanup. */
598 claim_zero(mlx5_glue->destroy_qp(qp));
599 rte_errno = err; /* Restore rte_errno. */
604 * Destroy a Verbs queue pair.
607 * Hash Rx queue to release its qp.
610 mlx5_ibv_qp_destroy(struct mlx5_hrxq *hrxq)
612 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
616 * Release a drop Rx queue Verbs object.
619 * Pointer to Ethernet device.
622 mlx5_rxq_ibv_obj_drop_release(struct rte_eth_dev *dev)
624 struct mlx5_priv *priv = dev->data->dev_private;
625 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
628 claim_zero(mlx5_glue->destroy_wq(rxq->wq));
630 claim_zero(mlx5_glue->destroy_cq(rxq->ibv_cq));
632 priv->drop_queue.rxq = NULL;
636 * Create a drop Rx queue Verbs object.
639 * Pointer to Ethernet device.
642 * 0 on success, a negative errno value otherwise and rte_errno is set.
645 mlx5_rxq_ibv_obj_drop_create(struct rte_eth_dev *dev)
647 struct mlx5_priv *priv = dev->data->dev_private;
648 struct ibv_context *ctx = priv->sh->ctx;
649 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
653 rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, SOCKET_ID_ANY);
655 DEBUG("Port %u cannot allocate drop Rx queue memory.",
660 priv->drop_queue.rxq = rxq;
661 rxq->ibv_cq = mlx5_glue->create_cq(ctx, 1, NULL, NULL, 0);
663 DEBUG("Port %u cannot allocate CQ for drop queue.",
668 rxq->wq = mlx5_glue->create_wq(ctx, &(struct ibv_wq_init_attr){
669 .wq_type = IBV_WQT_RQ,
676 DEBUG("Port %u cannot allocate WQ for drop queue.",
681 priv->drop_queue.rxq = rxq;
684 mlx5_rxq_ibv_obj_drop_release(dev);
689 * Create a Verbs drop action for Rx Hash queue.
692 * Pointer to Ethernet device.
695 * 0 on success, a negative errno value otherwise and rte_errno is set.
698 mlx5_ibv_drop_action_create(struct rte_eth_dev *dev)
700 struct mlx5_priv *priv = dev->data->dev_private;
701 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
702 struct ibv_rwq_ind_table *ind_tbl = NULL;
703 struct mlx5_rxq_obj *rxq;
706 MLX5_ASSERT(hrxq && hrxq->ind_table);
707 ret = mlx5_rxq_ibv_obj_drop_create(dev);
710 rxq = priv->drop_queue.rxq;
711 ind_tbl = mlx5_glue->create_rwq_ind_table
713 &(struct ibv_rwq_ind_table_init_attr){
714 .log_ind_tbl_size = 0,
715 .ind_tbl = (struct ibv_wq **)&rxq->wq,
719 DEBUG("Port %u cannot allocate indirection table for drop"
720 " queue.", dev->data->port_id);
724 hrxq->qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
725 &(struct ibv_qp_init_attr_ex){
726 .qp_type = IBV_QPT_RAW_PACKET,
727 .comp_mask = IBV_QP_INIT_ATTR_PD |
728 IBV_QP_INIT_ATTR_IND_TABLE |
729 IBV_QP_INIT_ATTR_RX_HASH,
730 .rx_hash_conf = (struct ibv_rx_hash_conf){
731 .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ,
732 .rx_hash_key_len = MLX5_RSS_HASH_KEY_LEN,
733 .rx_hash_key = rss_hash_default_key,
734 .rx_hash_fields_mask = 0,
736 .rwq_ind_tbl = ind_tbl,
740 DEBUG("Port %u cannot allocate QP for drop queue.",
745 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
746 hrxq->action = mlx5_glue->dv_create_flow_action_dest_ibv_qp(hrxq->qp);
752 hrxq->ind_table->ind_table = ind_tbl;
756 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
758 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
759 if (priv->drop_queue.rxq)
760 mlx5_rxq_ibv_obj_drop_release(dev);
765 * Release a drop hash Rx queue.
768 * Pointer to Ethernet device.
771 mlx5_ibv_drop_action_destroy(struct rte_eth_dev *dev)
773 struct mlx5_priv *priv = dev->data->dev_private;
774 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
775 struct ibv_rwq_ind_table *ind_tbl = hrxq->ind_table->ind_table;
777 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
778 claim_zero(mlx5_glue->destroy_flow_action(hrxq->action));
780 claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
781 claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl));
782 mlx5_rxq_ibv_obj_drop_release(dev);
785 struct mlx5_obj_ops ibv_obj_ops = {
786 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_wq_vlan_strip,
787 .rxq_obj_new = mlx5_rxq_ibv_obj_new,
788 .rxq_event_get = mlx5_rx_ibv_get_event,
789 .rxq_obj_modify = mlx5_ibv_modify_wq,
790 .rxq_obj_release = mlx5_rxq_ibv_obj_release,
791 .ind_table_new = mlx5_ibv_ind_table_new,
792 .ind_table_destroy = mlx5_ibv_ind_table_destroy,
793 .hrxq_new = mlx5_ibv_hrxq_new,
794 .hrxq_destroy = mlx5_ibv_qp_destroy,
795 .drop_action_create = mlx5_ibv_drop_action_create,
796 .drop_action_destroy = mlx5_ibv_drop_action_destroy,