1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
10 #include <sys/queue.h>
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_common_devx.h>
19 #include <mlx5_malloc.h>
22 #include "mlx5_common_os.h"
25 #include "mlx5_utils.h"
26 #include "mlx5_devx.h"
27 #include "mlx5_flow.h"
28 #include "mlx5_flow_os.h"
31 * Modify RQ vlan stripping offload
37 * 0 on success, non-0 otherwise
40 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
42 struct mlx5_devx_modify_rq_attr rq_attr;
44 memset(&rq_attr, 0, sizeof(rq_attr));
45 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
46 rq_attr.state = MLX5_RQC_STATE_RDY;
47 rq_attr.vsd = (on ? 0 : 1);
48 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
49 return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);
53 * Modify RQ using DevX API.
56 * DevX Rx queue object.
58 * Type of change queue state.
61 * 0 on success, a negative errno value otherwise and rte_errno is set.
64 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
66 struct mlx5_devx_modify_rq_attr rq_attr;
68 memset(&rq_attr, 0, sizeof(rq_attr));
70 case MLX5_RXQ_MOD_ERR2RST:
71 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
72 rq_attr.state = MLX5_RQC_STATE_RST;
74 case MLX5_RXQ_MOD_RST2RDY:
75 rq_attr.rq_state = MLX5_RQC_STATE_RST;
76 rq_attr.state = MLX5_RQC_STATE_RDY;
78 case MLX5_RXQ_MOD_RDY2ERR:
79 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
80 rq_attr.state = MLX5_RQC_STATE_ERR;
82 case MLX5_RXQ_MOD_RDY2RST:
83 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
84 rq_attr.state = MLX5_RQC_STATE_RST;
89 return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);
93 * Modify SQ using DevX API.
96 * DevX Tx queue object.
98 * Type of change queue state.
103 * 0 on success, a negative errno value otherwise and rte_errno is set.
106 mlx5_txq_devx_modify(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
109 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
112 if (type != MLX5_TXQ_MOD_RST2RDY) {
113 /* Change queue state to reset. */
114 if (type == MLX5_TXQ_MOD_ERR2RDY)
115 msq_attr.sq_state = MLX5_SQC_STATE_ERR;
117 msq_attr.sq_state = MLX5_SQC_STATE_RDY;
118 msq_attr.state = MLX5_SQC_STATE_RST;
119 ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr);
121 DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
122 " %s", strerror(errno));
127 if (type != MLX5_TXQ_MOD_RDY2RST) {
128 /* Change queue state to ready. */
129 msq_attr.sq_state = MLX5_SQC_STATE_RST;
130 msq_attr.state = MLX5_SQC_STATE_RDY;
131 ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr);
133 DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
134 " %s", strerror(errno));
140 * The dev_port variable is relevant only in Verbs API, and there is a
141 * pointer that points to this function and a parallel function in verbs
142 * intermittently, so they should have the same parameters.
149 * Destroy the Rx queue DevX object.
152 * Rxq object to destroy.
155 mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj)
157 mlx5_devx_rq_destroy(&rxq_obj->rq_obj);
158 memset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj));
159 mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
160 memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
164 * Release an Rx DevX queue object.
167 * DevX Rx queue object.
170 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
172 MLX5_ASSERT(rxq_obj);
173 if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
174 MLX5_ASSERT(rxq_obj->rq);
175 mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);
176 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
178 MLX5_ASSERT(rxq_obj->cq_obj.cq);
179 MLX5_ASSERT(rxq_obj->rq_obj.rq);
180 mlx5_rxq_release_devx_resources(rxq_obj);
181 if (rxq_obj->devx_channel)
182 mlx5_os_devx_destroy_event_channel
183 (rxq_obj->devx_channel);
188 * Get event for an Rx DevX queue object.
191 * DevX Rx queue object.
194 * 0 on success, a negative errno value otherwise and rte_errno is set.
197 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
199 #ifdef HAVE_IBV_DEVX_EVENT
201 struct mlx5dv_devx_async_event_hdr event_resp;
202 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
204 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
212 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) {
221 #endif /* HAVE_IBV_DEVX_EVENT */
225 * Create a RQ object using DevX.
228 * Pointer to Ethernet device.
233 * 0 on success, a negative errno value otherwise and rte_errno is set.
236 mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev,
237 struct mlx5_rxq_data *rxq_data)
239 struct mlx5_priv *priv = dev->data->dev_private;
240 struct mlx5_common_device *cdev = priv->sh->cdev;
241 struct mlx5_rxq_ctrl *rxq_ctrl =
242 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
243 struct mlx5_devx_create_rq_attr rq_attr = { 0 };
244 uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n;
245 uint32_t wqe_size, log_wqe_size;
247 /* Fill RQ attributes. */
248 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
249 rq_attr.flush_in_error_en = 1;
250 rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1;
251 rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id;
252 rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
254 mlx5_ts_format_conv(cdev->config.hca_attr.rq_ts_format);
255 /* Fill WQ attributes for this RQ. */
256 if (mlx5_rxq_mprq_enabled(rxq_data)) {
257 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
259 * Number of strides in each WQE:
260 * 512*2^single_wqe_log_num_of_strides.
262 rq_attr.wq_attr.single_wqe_log_num_of_strides =
263 rxq_data->strd_num_n -
264 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
265 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
266 rq_attr.wq_attr.single_stride_log_num_of_bytes =
267 rxq_data->strd_sz_n -
268 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
269 wqe_size = sizeof(struct mlx5_wqe_mprq);
271 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
272 wqe_size = sizeof(struct mlx5_wqe_data_seg);
274 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
275 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
276 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
277 rq_attr.wq_attr.log_wq_sz = log_desc_n;
278 rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
279 MLX5_WQ_END_PAD_MODE_ALIGN :
280 MLX5_WQ_END_PAD_MODE_NONE;
281 rq_attr.wq_attr.pd = cdev->pdn;
282 rq_attr.counter_set_id = priv->counter_set_id;
283 /* Create RQ using DevX API. */
284 return mlx5_devx_rq_create(cdev->ctx, &rxq_ctrl->obj->rq_obj, wqe_size,
285 log_desc_n, &rq_attr, rxq_ctrl->socket);
289 * Create a DevX CQ object for an Rx queue.
292 * Pointer to Ethernet device.
297 * 0 on success, a negative errno value otherwise and rte_errno is set.
300 mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev,
301 struct mlx5_rxq_data *rxq_data)
303 struct mlx5_devx_cq *cq_obj = 0;
304 struct mlx5_devx_cq_attr cq_attr = { 0 };
305 struct mlx5_priv *priv = dev->data->dev_private;
306 struct mlx5_dev_ctx_shared *sh = priv->sh;
307 struct mlx5_rxq_ctrl *rxq_ctrl =
308 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
309 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
311 uint16_t event_nums[1] = { 0 };
314 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
316 cq_attr.cqe_comp_en = 1u;
317 rxq_data->mcqe_format = priv->config.cqe_comp_fmt;
318 rxq_data->byte_mask = UINT32_MAX;
319 switch (priv->config.cqe_comp_fmt) {
320 case MLX5_CQE_RESP_FORMAT_HASH:
322 case MLX5_CQE_RESP_FORMAT_CSUM:
324 * Select CSUM miniCQE format only for non-vectorized
325 * MPRQ Rx burst, use HASH miniCQE format for others.
327 if (mlx5_rxq_check_vec_support(rxq_data) < 0 &&
328 mlx5_rxq_mprq_enabled(rxq_data))
329 cq_attr.mini_cqe_res_format =
330 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
332 cq_attr.mini_cqe_res_format =
333 MLX5_CQE_RESP_FORMAT_HASH;
334 rxq_data->mcqe_format = cq_attr.mini_cqe_res_format;
336 case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX:
337 rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK;
339 case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX:
340 cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt;
342 case MLX5_CQE_RESP_FORMAT_L34H_STRIDX:
343 cq_attr.mini_cqe_res_format = 0;
344 cq_attr.mini_cqe_res_format_ext = 1;
348 "Port %u Rx CQE compression is enabled, format %d.",
349 dev->data->port_id, priv->config.cqe_comp_fmt);
351 * For vectorized Rx, it must not be doubled in order to
352 * make cq_ci and rq_ci aligned.
354 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
356 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
358 "Port %u Rx CQE compression is disabled for HW"
361 } else if (priv->config.cqe_comp && rxq_data->lro) {
363 "Port %u Rx CQE compression is disabled for LRO.",
366 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);
367 log_cqe_n = log2above(cqe_n);
368 /* Create CQ using DevX API. */
369 ret = mlx5_devx_cq_create(sh->cdev->ctx, &rxq_ctrl->obj->cq_obj,
370 log_cqe_n, &cq_attr, sh->numa_node);
373 cq_obj = &rxq_ctrl->obj->cq_obj;
374 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])
375 (uintptr_t)cq_obj->cqes;
376 rxq_data->cq_db = cq_obj->db_rec;
377 rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
378 rxq_data->cqe_n = log_cqe_n;
379 rxq_data->cqn = cq_obj->cq->id;
380 if (rxq_ctrl->obj->devx_channel) {
381 ret = mlx5_os_devx_subscribe_devx_event
382 (rxq_ctrl->obj->devx_channel,
386 (uint64_t)(uintptr_t)cq_obj->cq);
388 DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
390 mlx5_devx_cq_destroy(cq_obj);
391 memset(cq_obj, 0, sizeof(*cq_obj));
400 * Create the Rx hairpin queue object.
403 * Pointer to Ethernet device.
405 * Queue index in DPDK Rx queue array.
408 * 0 on success, a negative errno value otherwise and rte_errno is set.
411 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
413 struct mlx5_priv *priv = dev->data->dev_private;
414 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
415 struct mlx5_rxq_ctrl *rxq_ctrl =
416 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
417 struct mlx5_devx_create_rq_attr attr = { 0 };
418 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
419 uint32_t max_wq_data;
421 MLX5_ASSERT(rxq_data);
423 tmpl->rxq_ctrl = rxq_ctrl;
425 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
426 /* Jumbo frames > 9KB should be supported, and more packets. */
427 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
428 if (priv->config.log_hp_size > max_wq_data) {
429 DRV_LOG(ERR, "Total data size %u power of 2 is "
430 "too large for hairpin.",
431 priv->config.log_hp_size);
435 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
437 attr.wq_attr.log_hairpin_data_sz =
438 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
439 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
441 /* Set the packets number to the maximum value for performance. */
442 attr.wq_attr.log_hairpin_num_packets =
443 attr.wq_attr.log_hairpin_data_sz -
444 MLX5_HAIRPIN_QUEUE_STRIDE;
445 attr.counter_set_id = priv->counter_set_id;
446 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->cdev->ctx, &attr,
450 "Port %u Rx hairpin queue %u can't create rq object.",
451 dev->data->port_id, idx);
455 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
460 * Create the Rx queue DevX object.
463 * Pointer to Ethernet device.
465 * Queue index in DPDK Rx queue array.
468 * 0 on success, a negative errno value otherwise and rte_errno is set.
471 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
473 struct mlx5_priv *priv = dev->data->dev_private;
474 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
475 struct mlx5_rxq_ctrl *rxq_ctrl =
476 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
477 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
480 MLX5_ASSERT(rxq_data);
482 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
483 return mlx5_rxq_obj_hairpin_new(dev, idx);
484 tmpl->rxq_ctrl = rxq_ctrl;
487 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
489 tmpl->devx_channel = mlx5_os_devx_create_event_channel
490 (priv->sh->cdev->ctx,
492 if (!tmpl->devx_channel) {
494 DRV_LOG(ERR, "Failed to create event channel %d.",
498 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
500 /* Create CQ using DevX API. */
501 ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
503 DRV_LOG(ERR, "Failed to create CQ.");
506 /* Create RQ using DevX API. */
507 ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
509 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
510 dev->data->port_id, idx);
514 /* Change queue state to ready. */
515 ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);
518 rxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.wq.umem_buf;
519 rxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.wq.db_rec;
520 rxq_data->cq_arm_sn = 0;
522 mlx5_rxq_initialize(rxq_data);
523 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
524 rxq_ctrl->wqn = tmpl->rq_obj.rq->id;
527 ret = rte_errno; /* Save rte_errno before cleanup. */
528 mlx5_rxq_devx_obj_release(tmpl);
529 rte_errno = ret; /* Restore rte_errno. */
534 * Prepare RQT attribute structure for DevX RQT API.
537 * Pointer to Ethernet device.
539 * Log of number of queues in the array.
541 * List of RX queue indices or NULL, in which case
542 * the attribute will be filled by drop queue ID.
544 * Size of @p queues array or 0 if it is NULL.
546 * DevX indirection table object.
549 * The RQT attr object initialized, NULL otherwise and rte_errno is set.
551 static struct mlx5_devx_rqt_attr *
552 mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
553 const unsigned int log_n,
554 const uint16_t *queues,
555 const uint32_t queues_n)
557 struct mlx5_priv *priv = dev->data->dev_private;
558 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
559 const unsigned int rqt_n = 1 << log_n;
562 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
563 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
565 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
570 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
571 rqt_attr->rqt_actual_size = rqt_n;
572 if (queues == NULL) {
573 for (i = 0; i < rqt_n; i++)
574 rqt_attr->rq_list[i] = priv->drop_queue.rxq->rq->id;
577 for (i = 0; i != queues_n; ++i) {
578 struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
579 struct mlx5_rxq_ctrl *rxq_ctrl =
580 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
582 rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id;
585 for (j = 0; i != rqt_n; ++j, ++i)
586 rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
591 * Create RQT using DevX API as a filed of indirection table.
594 * Pointer to Ethernet device.
596 * Log of number of queues in the array.
598 * DevX indirection table object.
601 * 0 on success, a negative errno value otherwise and rte_errno is set.
604 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
605 struct mlx5_ind_table_obj *ind_tbl)
607 struct mlx5_priv *priv = dev->data->dev_private;
608 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
609 const uint16_t *queues = dev->data->dev_started ? ind_tbl->queues :
612 MLX5_ASSERT(ind_tbl);
613 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n, queues,
617 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->cdev->ctx, rqt_attr);
620 DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
629 * Modify RQT using DevX API as a filed of indirection table.
632 * Pointer to Ethernet device.
634 * Log of number of queues in the array.
636 * DevX indirection table object.
639 * 0 on success, a negative errno value otherwise and rte_errno is set.
642 mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
643 const uint16_t *queues, const uint32_t queues_n,
644 struct mlx5_ind_table_obj *ind_tbl)
647 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
649 MLX5_ASSERT(ind_tbl);
650 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
655 ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
658 DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
664 * Destroy the DevX RQT object.
667 * Indirection table to release.
670 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
672 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
676 * Set TIR attribute struct with relevant input values.
679 * Pointer to Ethernet device.
681 * RSS key for the Rx hash queue.
682 * @param[in] hash_fields
683 * Verbs protocol hash field to make the RSS on.
685 * Indirection table for TIR. If table queues array is NULL,
686 * a TIR for drop queue is assumed.
689 * @param[out] tir_attr
690 * Parameters structure for TIR creation/modification.
693 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
696 mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
697 uint64_t hash_fields,
698 const struct mlx5_ind_table_obj *ind_tbl,
699 int tunnel, struct mlx5_devx_tir_attr *tir_attr)
701 struct mlx5_priv *priv = dev->data->dev_private;
702 enum mlx5_rxq_type rxq_obj_type;
706 /* NULL queues designate drop queue. */
707 if (ind_tbl->queues != NULL) {
708 struct mlx5_rxq_data *rxq_data =
709 (*priv->rxqs)[ind_tbl->queues[0]];
710 struct mlx5_rxq_ctrl *rxq_ctrl =
711 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
712 rxq_obj_type = rxq_ctrl->type;
714 /* Enable TIR LRO only if all the queues were configured for. */
715 for (i = 0; i < ind_tbl->queues_n; ++i) {
716 if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
722 rxq_obj_type = priv->drop_queue.rxq->rxq_ctrl->type;
724 memset(tir_attr, 0, sizeof(*tir_attr));
725 tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
726 tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
727 tir_attr->tunneled_offload_en = !!tunnel;
728 /* If needed, translate hash_fields bitmap to PRM format. */
730 struct mlx5_rx_hash_field_select *rx_hash_field_select =
731 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
732 hash_fields & IBV_RX_HASH_INNER ?
733 &tir_attr->rx_hash_field_selector_inner :
735 &tir_attr->rx_hash_field_selector_outer;
736 /* 1 bit: 0: IPv4, 1: IPv6. */
737 rx_hash_field_select->l3_prot_type =
738 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
739 /* 1 bit: 0: TCP, 1: UDP. */
740 rx_hash_field_select->l4_prot_type =
741 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
742 /* Bitmask which sets which fields to use in RX Hash. */
743 rx_hash_field_select->selected_fields =
744 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
745 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
746 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
747 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
748 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
749 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
750 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
751 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
753 if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN)
754 tir_attr->transport_domain = priv->sh->td->id;
756 tir_attr->transport_domain = priv->sh->tdn;
757 memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
758 tir_attr->indirect_table = ind_tbl->rqt->id;
759 if (dev->data->dev_conf.lpbk_mode)
760 tir_attr->self_lb_block =
761 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
763 tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout;
764 tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
765 tir_attr->lro_enable_mask =
766 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
767 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
772 * Create an Rx Hash queue.
775 * Pointer to Ethernet device.
777 * Pointer to Rx Hash queue.
782 * 0 on success, a negative errno value otherwise and rte_errno is set.
785 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
786 int tunnel __rte_unused)
788 struct mlx5_priv *priv = dev->data->dev_private;
789 struct mlx5_devx_tir_attr tir_attr = {0};
792 mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
793 hrxq->ind_table, tunnel, &tir_attr);
794 hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->cdev->ctx, &tir_attr);
796 DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
801 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
802 if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
810 err = rte_errno; /* Save rte_errno before cleanup. */
812 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
813 rte_errno = err; /* Restore rte_errno. */
818 * Destroy a DevX TIR object.
821 * Hash Rx queue to release its tir.
824 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
826 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
830 * Modify an Rx Hash queue configuration.
833 * Pointer to Ethernet device.
835 * Hash Rx queue to modify.
837 * RSS key for the Rx hash queue.
839 * Verbs protocol hash field to make the RSS on.
841 * Indirection table for TIR.
844 * 0 on success, a negative errno value otherwise and rte_errno is set.
847 mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
848 const uint8_t *rss_key,
849 uint64_t hash_fields,
850 const struct mlx5_ind_table_obj *ind_tbl)
852 struct mlx5_devx_modify_tir_attr modify_tir = {0};
855 * untested for modification fields:
856 * - rx_hash_symmetric not set in hrxq_new(),
857 * - rx_hash_fn set hard-coded in hrxq_new(),
858 * - lro_xxx not set after rxq setup
860 if (ind_tbl != hrxq->ind_table)
861 modify_tir.modify_bitmask |=
862 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
863 if (hash_fields != hrxq->hash_fields ||
864 memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
865 modify_tir.modify_bitmask |=
866 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
867 mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
868 0, /* N/A - tunnel modification unsupported */
870 modify_tir.tirn = hrxq->tir->id;
871 if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
872 DRV_LOG(ERR, "port %u cannot modify DevX TIR",
881 * Create a DevX drop Rx queue.
884 * Pointer to Ethernet device.
887 * 0 on success, a negative errno value otherwise and rte_errno is set.
890 mlx5_rxq_devx_obj_drop_create(struct rte_eth_dev *dev)
892 struct mlx5_priv *priv = dev->data->dev_private;
893 int socket_id = dev->device->numa_node;
894 struct mlx5_rxq_ctrl *rxq_ctrl;
895 struct mlx5_rxq_data *rxq_data;
896 struct mlx5_rxq_obj *rxq = NULL;
900 * Initialize dummy control structures.
901 * They are required to hold pointers for cleanup
902 * and are only accessible via drop queue DevX objects.
904 rxq_ctrl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq_ctrl),
906 if (rxq_ctrl == NULL) {
907 DRV_LOG(ERR, "Port %u could not allocate drop queue control",
912 rxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rxq), 0, socket_id);
914 DRV_LOG(ERR, "Port %u could not allocate drop queue object",
919 rxq->rxq_ctrl = rxq_ctrl;
920 rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;
921 rxq_ctrl->priv = priv;
923 rxq_data = &rxq_ctrl->rxq;
924 /* Create CQ using DevX API. */
925 ret = mlx5_rxq_create_devx_cq_resources(dev, rxq_data);
927 DRV_LOG(ERR, "Port %u drop queue CQ creation failed.",
931 /* Create RQ using DevX API. */
932 ret = mlx5_rxq_create_devx_rq_resources(dev, rxq_data);
934 DRV_LOG(ERR, "Port %u drop queue RQ creation failed.",
939 /* Change queue state to ready. */
940 ret = mlx5_devx_modify_rq(rxq, MLX5_RXQ_MOD_RST2RDY);
943 /* Initialize drop queue. */
944 priv->drop_queue.rxq = rxq;
947 ret = rte_errno; /* Save rte_errno before cleanup. */
949 if (rxq->rq_obj.rq != NULL)
950 mlx5_devx_rq_destroy(&rxq->rq_obj);
951 if (rxq->cq_obj.cq != NULL)
952 mlx5_devx_cq_destroy(&rxq->cq_obj);
953 if (rxq->devx_channel)
954 mlx5_os_devx_destroy_event_channel
958 if (rxq_ctrl != NULL)
960 rte_errno = ret; /* Restore rte_errno. */
965 * Release drop Rx queue resources.
968 * Pointer to Ethernet device.
971 mlx5_rxq_devx_obj_drop_release(struct rte_eth_dev *dev)
973 struct mlx5_priv *priv = dev->data->dev_private;
974 struct mlx5_rxq_obj *rxq = priv->drop_queue.rxq;
975 struct mlx5_rxq_ctrl *rxq_ctrl = rxq->rxq_ctrl;
977 mlx5_rxq_devx_obj_release(rxq);
980 priv->drop_queue.rxq = NULL;
984 * Release a drop hash Rx queue.
987 * Pointer to Ethernet device.
990 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
992 struct mlx5_priv *priv = dev->data->dev_private;
993 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
995 if (hrxq->tir != NULL)
996 mlx5_devx_tir_destroy(hrxq);
997 if (hrxq->ind_table->ind_table != NULL)
998 mlx5_devx_ind_table_destroy(hrxq->ind_table);
999 if (priv->drop_queue.rxq->rq != NULL)
1000 mlx5_rxq_devx_obj_drop_release(dev);
1004 * Create a DevX drop action for Rx Hash queue.
1007 * Pointer to Ethernet device.
1010 * 0 on success, a negative errno value otherwise and rte_errno is set.
1013 mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
1015 struct mlx5_priv *priv = dev->data->dev_private;
1016 struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
1019 ret = mlx5_rxq_devx_obj_drop_create(dev);
1021 DRV_LOG(ERR, "Cannot create drop RX queue");
1024 /* hrxq->ind_table queues are NULL, drop RX queue ID will be used */
1025 ret = mlx5_devx_ind_table_new(dev, 0, hrxq->ind_table);
1027 DRV_LOG(ERR, "Cannot create drop hash RX queue indirection table");
1030 ret = mlx5_devx_hrxq_new(dev, hrxq, /* tunnel */ false);
1032 DRV_LOG(ERR, "Cannot create drop hash RX queue");
1037 mlx5_devx_drop_action_destroy(dev);
1042 * Select TXQ TIS number.
1045 * Pointer to Ethernet device.
1047 * Queue index in DPDK Tx queue array.
1050 * > 0 on success, a negative errno value otherwise.
1053 mlx5_get_txq_tis_num(struct rte_eth_dev *dev, uint16_t queue_idx)
1055 struct mlx5_priv *priv = dev->data->dev_private;
1058 if (priv->sh->bond.n_port && priv->sh->lag.affinity_mode ==
1059 MLX5_LAG_MODE_TIS) {
1060 tis_idx = (priv->lag_affinity_idx + queue_idx) %
1061 priv->sh->bond.n_port;
1062 DRV_LOG(INFO, "port %d txq %d gets affinity %d and maps to PF %d.",
1063 dev->data->port_id, queue_idx, tis_idx + 1,
1064 priv->sh->lag.tx_remap_affinity[tis_idx]);
1068 MLX5_ASSERT(priv->sh->tis[tis_idx]);
1069 return priv->sh->tis[tis_idx]->id;
1073 * Create the Tx hairpin queue object.
1076 * Pointer to Ethernet device.
1078 * Queue index in DPDK Tx queue array.
1081 * 0 on success, a negative errno value otherwise and rte_errno is set.
1084 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1086 struct mlx5_priv *priv = dev->data->dev_private;
1087 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1088 struct mlx5_txq_ctrl *txq_ctrl =
1089 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1090 struct mlx5_devx_create_sq_attr attr = { 0 };
1091 struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
1092 uint32_t max_wq_data;
1094 MLX5_ASSERT(txq_data);
1096 tmpl->txq_ctrl = txq_ctrl;
1098 attr.tis_lst_sz = 1;
1099 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1100 /* Jumbo frames > 9KB should be supported, and more packets. */
1101 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
1102 if (priv->config.log_hp_size > max_wq_data) {
1103 DRV_LOG(ERR, "Total data size %u power of 2 is "
1104 "too large for hairpin.",
1105 priv->config.log_hp_size);
1109 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
1111 attr.wq_attr.log_hairpin_data_sz =
1112 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
1113 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
1115 /* Set the packets number to the maximum value for performance. */
1116 attr.wq_attr.log_hairpin_num_packets =
1117 attr.wq_attr.log_hairpin_data_sz -
1118 MLX5_HAIRPIN_QUEUE_STRIDE;
1120 attr.tis_num = mlx5_get_txq_tis_num(dev, idx);
1121 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->cdev->ctx, &attr);
1124 "Port %u tx hairpin queue %u can't create SQ object.",
1125 dev->data->port_id, idx);
1132 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1134 * Destroy the Tx queue DevX object.
1137 * Txq object to destroy.
1140 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
1142 mlx5_devx_sq_destroy(&txq_obj->sq_obj);
1143 memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj));
1144 mlx5_devx_cq_destroy(&txq_obj->cq_obj);
1145 memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj));
1149 * Create a SQ object and its resources using DevX.
1152 * Pointer to Ethernet device.
1154 * Queue index in DPDK Tx queue array.
1155 * @param[in] log_desc_n
1156 * Log of number of descriptors in queue.
1159 * 0 on success, a negative errno value otherwise and rte_errno is set.
1162 mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
1163 uint16_t log_desc_n)
1165 struct mlx5_priv *priv = dev->data->dev_private;
1166 struct mlx5_common_device *cdev = priv->sh->cdev;
1167 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1168 struct mlx5_txq_ctrl *txq_ctrl =
1169 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1170 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1171 struct mlx5_devx_create_sq_attr sq_attr = {
1172 .flush_in_error_en = 1,
1173 .allow_multi_pkt_send_wqe = !!priv->config.mps,
1174 .min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode,
1175 .allow_swp = !!priv->config.swp,
1176 .cqn = txq_obj->cq_obj.cq->id,
1178 .wq_attr = (struct mlx5_devx_wq_attr){
1181 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
1184 mlx5_ts_format_conv(cdev->config.hca_attr.sq_ts_format),
1185 .tis_num = mlx5_get_txq_tis_num(dev, idx),
1188 /* Create Send Queue object with DevX. */
1189 return mlx5_devx_sq_create(cdev->ctx, &txq_obj->sq_obj,
1190 log_desc_n, &sq_attr, priv->sh->numa_node);
1195 * Create the Tx queue DevX object.
1198 * Pointer to Ethernet device.
1200 * Queue index in DPDK Tx queue array.
1203 * 0 on success, a negative errno value otherwise and rte_errno is set.
1206 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
1208 struct mlx5_priv *priv = dev->data->dev_private;
1209 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1210 struct mlx5_txq_ctrl *txq_ctrl =
1211 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1213 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
1214 return mlx5_txq_obj_hairpin_new(dev, idx);
1215 #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
1216 DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
1217 dev->data->port_id, idx);
1221 struct mlx5_dev_ctx_shared *sh = priv->sh;
1222 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1223 struct mlx5_devx_cq_attr cq_attr = {
1224 .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
1227 uint32_t cqe_n, log_desc_n;
1228 uint32_t wqe_n, wqe_size;
1231 MLX5_ASSERT(txq_data);
1232 MLX5_ASSERT(txq_obj);
1233 txq_obj->txq_ctrl = txq_ctrl;
1235 cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
1236 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
1237 log_desc_n = log2above(cqe_n);
1238 cqe_n = 1UL << log_desc_n;
1239 if (cqe_n > UINT16_MAX) {
1240 DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.",
1241 dev->data->port_id, txq_data->idx, cqe_n);
1245 /* Create completion queue object with DevX. */
1246 ret = mlx5_devx_cq_create(sh->cdev->ctx, &txq_obj->cq_obj, log_desc_n,
1247 &cq_attr, priv->sh->numa_node);
1249 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
1250 dev->data->port_id, idx);
1253 txq_data->cqe_n = log_desc_n;
1254 txq_data->cqe_s = cqe_n;
1255 txq_data->cqe_m = txq_data->cqe_s - 1;
1256 txq_data->cqes = txq_obj->cq_obj.cqes;
1257 txq_data->cq_ci = 0;
1258 txq_data->cq_pi = 0;
1259 txq_data->cq_db = txq_obj->cq_obj.db_rec;
1260 *txq_data->cq_db = 0;
1262 * Adjust the amount of WQEs depending on inline settings.
1263 * The number of descriptors should be enough to handle
1264 * the specified number of packets. If queue is being created
1265 * with Verbs the rdma-core does queue size adjustment
1266 * internally in the mlx5_calc_sq_size(), we do the same
1267 * for the queue being created with DevX at this point.
1269 wqe_size = txq_data->tso_en ?
1270 RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0;
1271 wqe_size += sizeof(struct mlx5_wqe_cseg) +
1272 sizeof(struct mlx5_wqe_eseg) +
1273 sizeof(struct mlx5_wqe_dseg);
1274 if (txq_data->inlen_send)
1275 wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) +
1276 sizeof(struct mlx5_wqe_eseg) +
1277 RTE_ALIGN(txq_data->inlen_send +
1280 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
1281 /* Create Send Queue object with DevX. */
1282 wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size,
1283 (uint32_t)priv->sh->device_attr.max_qp_wr);
1284 log_desc_n = log2above(wqe_n);
1285 ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n);
1287 DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
1288 dev->data->port_id, idx);
1292 /* Create the Work Queue. */
1293 txq_data->wqe_n = log_desc_n;
1294 txq_data->wqe_s = 1 << txq_data->wqe_n;
1295 txq_data->wqe_m = txq_data->wqe_s - 1;
1296 txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes;
1297 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1298 txq_data->wqe_ci = 0;
1299 txq_data->wqe_pi = 0;
1300 txq_data->wqe_comp = 0;
1301 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1302 txq_data->qp_db = &txq_obj->sq_obj.db_rec[MLX5_SND_DBR];
1303 *txq_data->qp_db = 0;
1304 txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8;
1305 /* Change Send Queue state to Ready-to-Send. */
1306 ret = mlx5_txq_devx_modify(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
1310 "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
1311 dev->data->port_id, idx);
1314 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1316 * If using DevX need to query and store TIS transport domain value.
1317 * This is done once per port.
1318 * Will use this value on Rx, when creating matching TIR.
1321 priv->sh->tdn = priv->sh->td->id;
1323 MLX5_ASSERT(sh->tx_uar);
1324 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
1325 MLX5_ASSERT(reg_addr);
1326 txq_ctrl->bf_reg = reg_addr;
1327 txq_ctrl->uar_mmap_offset =
1328 mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
1329 txq_uar_init(txq_ctrl);
1330 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1333 ret = rte_errno; /* Save rte_errno before cleanup. */
1334 mlx5_txq_release_devx_resources(txq_obj);
1335 rte_errno = ret; /* Restore rte_errno. */
1341 * Release an Tx DevX queue object.
1344 * DevX Tx queue object.
1347 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
1349 MLX5_ASSERT(txq_obj);
1350 if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
1352 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1353 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1355 mlx5_txq_release_devx_resources(txq_obj);
1360 struct mlx5_obj_ops devx_obj_ops = {
1361 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
1362 .rxq_obj_new = mlx5_rxq_devx_obj_new,
1363 .rxq_event_get = mlx5_rx_devx_get_event,
1364 .rxq_obj_modify = mlx5_devx_modify_rq,
1365 .rxq_obj_release = mlx5_rxq_devx_obj_release,
1366 .ind_table_new = mlx5_devx_ind_table_new,
1367 .ind_table_modify = mlx5_devx_ind_table_modify,
1368 .ind_table_destroy = mlx5_devx_ind_table_destroy,
1369 .hrxq_new = mlx5_devx_hrxq_new,
1370 .hrxq_destroy = mlx5_devx_tir_destroy,
1371 .hrxq_modify = mlx5_devx_hrxq_modify,
1372 .drop_action_create = mlx5_devx_drop_action_create,
1373 .drop_action_destroy = mlx5_devx_drop_action_destroy,
1374 .txq_obj_new = mlx5_txq_devx_obj_new,
1375 .txq_obj_modify = mlx5_txq_devx_modify,
1376 .txq_obj_release = mlx5_txq_devx_obj_release,
1377 .lb_dummy_queue_create = NULL,
1378 .lb_dummy_queue_release = NULL,