1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
10 #include <sys/queue.h>
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_common_devx.h>
19 #include <mlx5_malloc.h>
22 #include "mlx5_common_os.h"
23 #include "mlx5_rxtx.h"
24 #include "mlx5_utils.h"
25 #include "mlx5_devx.h"
26 #include "mlx5_flow.h"
27 #include "mlx5_flow_os.h"
30 * Modify RQ vlan stripping offload
36 * 0 on success, non-0 otherwise
39 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
41 struct mlx5_devx_modify_rq_attr rq_attr;
43 memset(&rq_attr, 0, sizeof(rq_attr));
44 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
45 rq_attr.state = MLX5_RQC_STATE_RDY;
46 rq_attr.vsd = (on ? 0 : 1);
47 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
48 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
52 * Modify RQ using DevX API.
55 * DevX Rx queue object.
57 * Type of change queue state.
60 * 0 on success, a negative errno value otherwise and rte_errno is set.
63 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
65 struct mlx5_devx_modify_rq_attr rq_attr;
67 memset(&rq_attr, 0, sizeof(rq_attr));
69 case MLX5_RXQ_MOD_ERR2RST:
70 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
71 rq_attr.state = MLX5_RQC_STATE_RST;
73 case MLX5_RXQ_MOD_RST2RDY:
74 rq_attr.rq_state = MLX5_RQC_STATE_RST;
75 rq_attr.state = MLX5_RQC_STATE_RDY;
77 case MLX5_RXQ_MOD_RDY2ERR:
78 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
79 rq_attr.state = MLX5_RQC_STATE_ERR;
81 case MLX5_RXQ_MOD_RDY2RST:
82 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
83 rq_attr.state = MLX5_RQC_STATE_RST;
88 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
92 * Modify SQ using DevX API.
95 * DevX Tx queue object.
97 * Type of change queue state.
102 * 0 on success, a negative errno value otherwise and rte_errno is set.
105 mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
108 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
111 if (type != MLX5_TXQ_MOD_RST2RDY) {
112 /* Change queue state to reset. */
113 if (type == MLX5_TXQ_MOD_ERR2RDY)
114 msq_attr.sq_state = MLX5_SQC_STATE_ERR;
116 msq_attr.sq_state = MLX5_SQC_STATE_RDY;
117 msq_attr.state = MLX5_SQC_STATE_RST;
118 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
120 DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
121 " %s", strerror(errno));
126 if (type != MLX5_TXQ_MOD_RDY2RST) {
127 /* Change queue state to ready. */
128 msq_attr.sq_state = MLX5_SQC_STATE_RST;
129 msq_attr.state = MLX5_SQC_STATE_RDY;
130 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
132 DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
133 " %s", strerror(errno));
139 * The dev_port variable is relevant only in Verbs API, and there is a
140 * pointer that points to this function and a parallel function in verbs
141 * intermittently, so they should have the same parameters.
148 * Release the resources allocated for an RQ DevX object.
151 * DevX Rx queue object.
154 mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
156 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
158 if (rxq_ctrl->wq_umem) {
159 mlx5_os_umem_dereg(rxq_ctrl->wq_umem);
160 rxq_ctrl->wq_umem = NULL;
162 if (rxq_ctrl->rxq.wqes) {
163 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
164 rxq_ctrl->rxq.wqes = NULL;
167 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
168 mlx5_os_get_umem_id(dbr_page->umem),
169 rxq_ctrl->rq_dbr_offset));
170 rxq_ctrl->rq_dbrec_page = NULL;
175 * Release the resources allocated for the Rx CQ DevX object.
178 * DevX Rx queue object.
181 mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
183 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
185 if (rxq_ctrl->cq_umem) {
186 mlx5_os_umem_dereg(rxq_ctrl->cq_umem);
187 rxq_ctrl->cq_umem = NULL;
189 if (rxq_ctrl->rxq.cqes) {
190 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
191 rxq_ctrl->rxq.cqes = NULL;
194 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
195 mlx5_os_get_umem_id(dbr_page->umem),
196 rxq_ctrl->cq_dbr_offset));
197 rxq_ctrl->cq_dbrec_page = NULL;
202 * Release an Rx DevX queue object.
205 * DevX Rx queue object.
208 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
210 MLX5_ASSERT(rxq_obj);
211 MLX5_ASSERT(rxq_obj->rq);
212 if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
213 mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);
214 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
216 MLX5_ASSERT(rxq_obj->devx_cq);
217 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
218 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
219 if (rxq_obj->devx_channel)
220 mlx5_os_devx_destroy_event_channel
221 (rxq_obj->devx_channel);
222 mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
223 mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
228 * Get event for an Rx DevX queue object.
231 * DevX Rx queue object.
234 * 0 on success, a negative errno value otherwise and rte_errno is set.
237 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
239 #ifdef HAVE_IBV_DEVX_EVENT
241 struct mlx5dv_devx_async_event_hdr event_resp;
242 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
244 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
252 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
261 #endif /* HAVE_IBV_DEVX_EVENT */
265 * Fill common fields of create RQ attributes structure.
268 * Pointer to Rx queue data.
270 * CQ number to use with this RQ.
272 * RQ attributes structure to fill..
275 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
276 struct mlx5_devx_create_rq_attr *rq_attr)
278 rq_attr->state = MLX5_RQC_STATE_RST;
279 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
281 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
285 * Fill common fields of DevX WQ attributes structure.
288 * Pointer to device private data.
290 * Pointer to Rx queue control structure.
292 * WQ attributes structure to fill..
295 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
296 struct mlx5_devx_wq_attr *wq_attr)
298 wq_attr->end_padding_mode = priv->config.hw_padding ?
299 MLX5_WQ_END_PAD_MODE_ALIGN :
300 MLX5_WQ_END_PAD_MODE_NONE;
301 wq_attr->pd = priv->sh->pdn;
302 wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
303 wq_attr->dbr_umem_id =
304 mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem);
305 wq_attr->dbr_umem_valid = 1;
306 wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
307 wq_attr->wq_umem_valid = 1;
311 * Create a RQ object using DevX.
314 * Pointer to Ethernet device.
316 * Queue index in DPDK Rx queue array.
319 * The DevX RQ object initialized, NULL otherwise and rte_errno is set.
321 static struct mlx5_devx_obj *
322 mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
324 struct mlx5_priv *priv = dev->data->dev_private;
325 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
326 struct mlx5_rxq_ctrl *rxq_ctrl =
327 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
328 struct mlx5_devx_create_rq_attr rq_attr = { 0 };
329 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
330 uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
331 struct mlx5_devx_dbr_page *dbr_page;
333 uint32_t wq_size = 0;
334 uint32_t wqe_size = 0;
335 uint32_t log_wqe_size = 0;
337 struct mlx5_devx_obj *rq;
339 /* Fill RQ attributes. */
340 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
341 rq_attr.flush_in_error_en = 1;
342 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
343 /* Fill WQ attributes for this RQ. */
344 if (mlx5_rxq_mprq_enabled(rxq_data)) {
345 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
347 * Number of strides in each WQE:
348 * 512*2^single_wqe_log_num_of_strides.
350 rq_attr.wq_attr.single_wqe_log_num_of_strides =
351 rxq_data->strd_num_n -
352 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
353 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
354 rq_attr.wq_attr.single_stride_log_num_of_bytes =
355 rxq_data->strd_sz_n -
356 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
357 wqe_size = sizeof(struct mlx5_wqe_mprq);
359 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
360 wqe_size = sizeof(struct mlx5_wqe_data_seg);
362 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
363 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
364 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
365 /* Calculate and allocate WQ memory space. */
366 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
367 wq_size = wqe_n * wqe_size;
368 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
369 if (alignment == (size_t)-1) {
370 DRV_LOG(ERR, "Failed to get mem page size");
374 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
375 alignment, rxq_ctrl->socket);
378 rxq_data->wqes = buf;
379 rxq_ctrl->wq_umem = mlx5_os_umem_reg(priv->sh->ctx,
381 if (!rxq_ctrl->wq_umem)
383 /* Allocate RQ door-bell. */
384 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
385 if (dbr_offset < 0) {
386 DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
389 rxq_ctrl->rq_dbr_offset = dbr_offset;
390 rxq_ctrl->rq_dbrec_page = dbr_page;
391 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
392 (uintptr_t)rxq_ctrl->rq_dbr_offset);
393 /* Create RQ using DevX API. */
394 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
395 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
400 mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
405 * Create a DevX CQ object for an Rx queue.
408 * Pointer to Ethernet device.
410 * Queue index in DPDK Rx queue array.
413 * The DevX CQ object initialized, NULL otherwise and rte_errno is set.
415 static struct mlx5_devx_obj *
416 mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
418 struct mlx5_devx_obj *cq_obj = 0;
419 struct mlx5_devx_cq_attr cq_attr = { 0 };
420 struct mlx5_priv *priv = dev->data->dev_private;
421 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
422 struct mlx5_rxq_ctrl *rxq_ctrl =
423 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
424 size_t page_size = rte_mem_page_size();
425 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
426 struct mlx5_devx_dbr_page *dbr_page;
429 uint16_t event_nums[1] = {0};
434 if (page_size == (size_t)-1) {
435 DRV_LOG(ERR, "Failed to get page_size.");
438 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
440 cq_attr.cqe_comp_en = 1u;
441 rxq_data->mcqe_format = priv->config.cqe_comp_fmt;
442 rxq_data->byte_mask = UINT32_MAX;
443 switch (priv->config.cqe_comp_fmt) {
444 case MLX5_CQE_RESP_FORMAT_HASH:
446 case MLX5_CQE_RESP_FORMAT_CSUM:
448 * Select CSUM miniCQE format only for non-vectorized
449 * MPRQ Rx burst, use HASH miniCQE format for others.
451 if (mlx5_rxq_check_vec_support(rxq_data) < 0 &&
452 mlx5_rxq_mprq_enabled(rxq_data))
453 cq_attr.mini_cqe_res_format =
454 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
456 cq_attr.mini_cqe_res_format =
457 MLX5_CQE_RESP_FORMAT_HASH;
458 rxq_data->mcqe_format = cq_attr.mini_cqe_res_format;
460 case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX:
461 rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK;
463 case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX:
464 cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt;
466 case MLX5_CQE_RESP_FORMAT_L34H_STRIDX:
467 cq_attr.mini_cqe_res_format = 0;
468 cq_attr.mini_cqe_res_format_ext = 1;
472 "Port %u Rx CQE compression is enabled, format %d.",
473 dev->data->port_id, priv->config.cqe_comp_fmt);
475 * For vectorized Rx, it must not be doubled in order to
476 * make cq_ci and rq_ci aligned.
478 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
480 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
482 "Port %u Rx CQE compression is disabled for HW"
485 } else if (priv->config.cqe_comp && rxq_data->lro) {
487 "Port %u Rx CQE compression is disabled for LRO.",
490 log_cqe_n = log2above(cqe_n);
491 cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
492 buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
495 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
498 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
499 rxq_ctrl->cq_umem = mlx5_os_umem_reg(priv->sh->ctx, buf,
501 IBV_ACCESS_LOCAL_WRITE);
502 if (!rxq_ctrl->cq_umem) {
503 DRV_LOG(ERR, "Failed to register umem for CQ.");
506 /* Allocate CQ door-bell. */
507 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
508 if (dbr_offset < 0) {
509 DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
512 rxq_ctrl->cq_dbr_offset = dbr_offset;
513 rxq_ctrl->cq_dbrec_page = dbr_page;
514 rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
515 (uintptr_t)rxq_ctrl->cq_dbr_offset);
517 mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
518 /* Create CQ using DevX API. */
519 cq_attr.eqn = priv->sh->eqn;
520 cq_attr.uar_page_id =
521 mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
522 cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
523 cq_attr.q_umem_valid = 1;
524 cq_attr.log_cq_size = log_cqe_n;
525 cq_attr.log_page_size = rte_log2_u32(page_size);
526 cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
527 cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
528 cq_attr.db_umem_valid = 1;
529 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
532 rxq_data->cqe_n = log_cqe_n;
533 rxq_data->cqn = cq_obj->id;
534 if (rxq_ctrl->obj->devx_channel) {
535 ret = mlx5_os_devx_subscribe_devx_event
536 (rxq_ctrl->obj->devx_channel,
540 (uint64_t)(uintptr_t)cq_obj);
542 DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
547 /* Initialise CQ to 1's to mark HW ownership for all CQEs. */
548 memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
552 mlx5_devx_cmd_destroy(cq_obj);
553 mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
558 * Create the Rx hairpin queue object.
561 * Pointer to Ethernet device.
563 * Queue index in DPDK Rx queue array.
566 * 0 on success, a negative errno value otherwise and rte_errno is set.
569 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
571 struct mlx5_priv *priv = dev->data->dev_private;
572 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
573 struct mlx5_rxq_ctrl *rxq_ctrl =
574 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
575 struct mlx5_devx_create_rq_attr attr = { 0 };
576 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
577 uint32_t max_wq_data;
579 MLX5_ASSERT(rxq_data);
581 tmpl->rxq_ctrl = rxq_ctrl;
583 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
584 /* Jumbo frames > 9KB should be supported, and more packets. */
585 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
586 if (priv->config.log_hp_size > max_wq_data) {
587 DRV_LOG(ERR, "Total data size %u power of 2 is "
588 "too large for hairpin.",
589 priv->config.log_hp_size);
593 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
595 attr.wq_attr.log_hairpin_data_sz =
596 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
597 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
599 /* Set the packets number to the maximum value for performance. */
600 attr.wq_attr.log_hairpin_num_packets =
601 attr.wq_attr.log_hairpin_data_sz -
602 MLX5_HAIRPIN_QUEUE_STRIDE;
603 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
607 "Port %u Rx hairpin queue %u can't create rq object.",
608 dev->data->port_id, idx);
612 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
617 * Create the Rx queue DevX object.
620 * Pointer to Ethernet device.
622 * Queue index in DPDK Rx queue array.
625 * 0 on success, a negative errno value otherwise and rte_errno is set.
628 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
630 struct mlx5_priv *priv = dev->data->dev_private;
631 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
632 struct mlx5_rxq_ctrl *rxq_ctrl =
633 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
634 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
637 MLX5_ASSERT(rxq_data);
639 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
640 return mlx5_rxq_obj_hairpin_new(dev, idx);
641 tmpl->rxq_ctrl = rxq_ctrl;
644 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
646 tmpl->devx_channel = mlx5_os_devx_create_event_channel
649 if (!tmpl->devx_channel) {
651 DRV_LOG(ERR, "Failed to create event channel %d.",
655 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
657 /* Create CQ using DevX API. */
658 tmpl->devx_cq = mlx5_rxq_create_devx_cq_resources(dev, idx);
659 if (!tmpl->devx_cq) {
660 DRV_LOG(ERR, "Failed to create CQ.");
663 /* Create RQ using DevX API. */
664 tmpl->rq = mlx5_rxq_create_devx_rq_resources(dev, idx);
666 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
667 dev->data->port_id, idx);
671 /* Change queue state to ready. */
672 ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);
675 rxq_data->cq_arm_sn = 0;
676 mlx5_rxq_initialize(rxq_data);
678 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
679 rxq_ctrl->wqn = tmpl->rq->id;
682 ret = rte_errno; /* Save rte_errno before cleanup. */
684 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
686 claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
687 if (tmpl->devx_channel)
688 mlx5_os_devx_destroy_event_channel(tmpl->devx_channel);
689 mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
690 mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
691 rte_errno = ret; /* Restore rte_errno. */
696 * Prepare RQT attribute structure for DevX RQT API.
699 * Pointer to Ethernet device.
701 * Log of number of queues in the array.
703 * DevX indirection table object.
706 * The RQT attr object initialized, NULL otherwise and rte_errno is set.
708 static struct mlx5_devx_rqt_attr *
709 mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
710 const unsigned int log_n,
711 const uint16_t *queues,
712 const uint32_t queues_n)
714 struct mlx5_priv *priv = dev->data->dev_private;
715 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
716 const unsigned int rqt_n = 1 << log_n;
719 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
720 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
722 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
727 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
728 rqt_attr->rqt_actual_size = rqt_n;
729 for (i = 0; i != queues_n; ++i) {
730 struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
731 struct mlx5_rxq_ctrl *rxq_ctrl =
732 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
734 rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id;
737 for (j = 0; i != rqt_n; ++j, ++i)
738 rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
743 * Create RQT using DevX API as a filed of indirection table.
746 * Pointer to Ethernet device.
748 * Log of number of queues in the array.
750 * DevX indirection table object.
753 * 0 on success, a negative errno value otherwise and rte_errno is set.
756 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
757 struct mlx5_ind_table_obj *ind_tbl)
759 struct mlx5_priv *priv = dev->data->dev_private;
760 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
762 MLX5_ASSERT(ind_tbl);
763 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
768 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
771 DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
780 * Modify RQT using DevX API as a filed of indirection table.
783 * Pointer to Ethernet device.
785 * Log of number of queues in the array.
787 * DevX indirection table object.
790 * 0 on success, a negative errno value otherwise and rte_errno is set.
793 mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
794 const uint16_t *queues, const uint32_t queues_n,
795 struct mlx5_ind_table_obj *ind_tbl)
798 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
800 MLX5_ASSERT(ind_tbl);
801 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
806 ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
809 DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
815 * Destroy the DevX RQT object.
818 * Indirection table to release.
821 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
823 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
827 * Set TIR attribute struct with relevant input values.
830 * Pointer to Ethernet device.
832 * RSS key for the Rx hash queue.
833 * @param[in] hash_fields
834 * Verbs protocol hash field to make the RSS on.
836 * Indirection table for TIR.
839 * @param[out] tir_attr
840 * Parameters structure for TIR creation/modification.
843 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
846 mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
847 uint64_t hash_fields,
848 const struct mlx5_ind_table_obj *ind_tbl,
849 int tunnel, struct mlx5_devx_tir_attr *tir_attr)
851 struct mlx5_priv *priv = dev->data->dev_private;
852 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
853 struct mlx5_rxq_ctrl *rxq_ctrl =
854 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
855 enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type;
859 /* Enable TIR LRO only if all the queues were configured for. */
860 for (i = 0; i < ind_tbl->queues_n; ++i) {
861 if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
866 memset(tir_attr, 0, sizeof(*tir_attr));
867 tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
868 tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
869 tir_attr->tunneled_offload_en = !!tunnel;
870 /* If needed, translate hash_fields bitmap to PRM format. */
872 struct mlx5_rx_hash_field_select *rx_hash_field_select =
873 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
874 hash_fields & IBV_RX_HASH_INNER ?
875 &tir_attr->rx_hash_field_selector_inner :
877 &tir_attr->rx_hash_field_selector_outer;
878 /* 1 bit: 0: IPv4, 1: IPv6. */
879 rx_hash_field_select->l3_prot_type =
880 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
881 /* 1 bit: 0: TCP, 1: UDP. */
882 rx_hash_field_select->l4_prot_type =
883 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
884 /* Bitmask which sets which fields to use in RX Hash. */
885 rx_hash_field_select->selected_fields =
886 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
887 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
888 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
889 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
890 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
891 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
892 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
893 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
895 if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN)
896 tir_attr->transport_domain = priv->sh->td->id;
898 tir_attr->transport_domain = priv->sh->tdn;
899 memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
900 tir_attr->indirect_table = ind_tbl->rqt->id;
901 if (dev->data->dev_conf.lpbk_mode)
902 tir_attr->self_lb_block =
903 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
905 tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout;
906 tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
907 tir_attr->lro_enable_mask =
908 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
909 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
914 * Create an Rx Hash queue.
917 * Pointer to Ethernet device.
919 * Pointer to Rx Hash queue.
924 * 0 on success, a negative errno value otherwise and rte_errno is set.
927 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
928 int tunnel __rte_unused)
930 struct mlx5_priv *priv = dev->data->dev_private;
931 struct mlx5_devx_tir_attr tir_attr = {0};
934 mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
935 hrxq->ind_table, tunnel, &tir_attr);
936 hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
938 DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
943 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
944 if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
952 err = rte_errno; /* Save rte_errno before cleanup. */
954 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
955 rte_errno = err; /* Restore rte_errno. */
960 * Destroy a DevX TIR object.
963 * Hash Rx queue to release its tir.
966 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
968 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
972 * Modify an Rx Hash queue configuration.
975 * Pointer to Ethernet device.
977 * Hash Rx queue to modify.
979 * RSS key for the Rx hash queue.
981 * Verbs protocol hash field to make the RSS on.
983 * Indirection table for TIR.
986 * 0 on success, a negative errno value otherwise and rte_errno is set.
989 mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
990 const uint8_t *rss_key,
991 uint64_t hash_fields,
992 const struct mlx5_ind_table_obj *ind_tbl)
994 struct mlx5_devx_modify_tir_attr modify_tir = {0};
997 * untested for modification fields:
998 * - rx_hash_symmetric not set in hrxq_new(),
999 * - rx_hash_fn set hard-coded in hrxq_new(),
1000 * - lro_xxx not set after rxq setup
1002 if (ind_tbl != hrxq->ind_table)
1003 modify_tir.modify_bitmask |=
1004 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
1005 if (hash_fields != hrxq->hash_fields ||
1006 memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
1007 modify_tir.modify_bitmask |=
1008 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
1009 mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
1010 0, /* N/A - tunnel modification unsupported */
1012 modify_tir.tirn = hrxq->tir->id;
1013 if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
1014 DRV_LOG(ERR, "port %u cannot modify DevX TIR",
1015 dev->data->port_id);
1023 * Create a DevX drop action for Rx Hash queue.
1026 * Pointer to Ethernet device.
1029 * 0 on success, a negative errno value otherwise and rte_errno is set.
1032 mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
1035 DRV_LOG(ERR, "DevX drop action is not supported yet.");
1036 rte_errno = ENOTSUP;
1041 * Release a drop hash Rx queue.
1044 * Pointer to Ethernet device.
1047 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
1050 DRV_LOG(ERR, "DevX drop action is not supported yet.");
1051 rte_errno = ENOTSUP;
1055 * Create the Tx hairpin queue object.
1058 * Pointer to Ethernet device.
1060 * Queue index in DPDK Tx queue array.
1063 * 0 on success, a negative errno value otherwise and rte_errno is set.
1066 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1068 struct mlx5_priv *priv = dev->data->dev_private;
1069 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1070 struct mlx5_txq_ctrl *txq_ctrl =
1071 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1072 struct mlx5_devx_create_sq_attr attr = { 0 };
1073 struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
1074 uint32_t max_wq_data;
1076 MLX5_ASSERT(txq_data);
1078 tmpl->txq_ctrl = txq_ctrl;
1080 attr.tis_lst_sz = 1;
1081 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1082 /* Jumbo frames > 9KB should be supported, and more packets. */
1083 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
1084 if (priv->config.log_hp_size > max_wq_data) {
1085 DRV_LOG(ERR, "Total data size %u power of 2 is "
1086 "too large for hairpin.",
1087 priv->config.log_hp_size);
1091 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
1093 attr.wq_attr.log_hairpin_data_sz =
1094 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
1095 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
1097 /* Set the packets number to the maximum value for performance. */
1098 attr.wq_attr.log_hairpin_num_packets =
1099 attr.wq_attr.log_hairpin_data_sz -
1100 MLX5_HAIRPIN_QUEUE_STRIDE;
1101 attr.tis_num = priv->sh->tis->id;
1102 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
1105 "Port %u tx hairpin queue %u can't create SQ object.",
1106 dev->data->port_id, idx);
1113 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1115 * Release DevX SQ resources.
1118 * DevX Tx queue object.
1121 mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj)
1123 if (txq_obj->sq_devx) {
1124 claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx));
1125 txq_obj->sq_devx = NULL;
1127 if (txq_obj->sq_umem) {
1128 claim_zero(mlx5_os_umem_dereg(txq_obj->sq_umem));
1129 txq_obj->sq_umem = NULL;
1131 if (txq_obj->sq_buf) {
1132 mlx5_free(txq_obj->sq_buf);
1133 txq_obj->sq_buf = NULL;
1135 if (txq_obj->sq_dbrec_page) {
1136 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
1138 (txq_obj->sq_dbrec_page->umem),
1139 txq_obj->sq_dbrec_offset));
1140 txq_obj->sq_dbrec_page = NULL;
1145 * Destroy the Tx queue DevX object.
1148 * Txq object to destroy.
1151 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
1153 mlx5_txq_release_devx_sq_resources(txq_obj);
1154 mlx5_devx_cq_destroy(&txq_obj->cq_obj);
1155 memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj));
1159 * Create a SQ object and its resources using DevX.
1162 * Pointer to Ethernet device.
1164 * Queue index in DPDK Tx queue array.
1167 * Number of WQEs in SQ, 0 otherwise and rte_errno is set.
1170 mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx)
1172 struct mlx5_priv *priv = dev->data->dev_private;
1173 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1174 struct mlx5_txq_ctrl *txq_ctrl =
1175 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1176 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1177 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
1182 MLX5_ASSERT(txq_data);
1183 MLX5_ASSERT(txq_obj);
1184 page_size = rte_mem_page_size();
1185 if (page_size == (size_t)-1) {
1186 DRV_LOG(ERR, "Failed to get mem page size.");
1190 wqe_n = RTE_MIN(1UL << txq_data->elts_n,
1191 (uint32_t)priv->sh->device_attr.max_qp_wr);
1192 txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1193 wqe_n * sizeof(struct mlx5_wqe),
1194 page_size, priv->sh->numa_node);
1195 if (!txq_obj->sq_buf) {
1197 "Port %u Tx queue %u cannot allocate memory (SQ).",
1198 dev->data->port_id, txq_data->idx);
1202 /* Register allocated buffer in user space with DevX. */
1203 txq_obj->sq_umem = mlx5_os_umem_reg
1205 (void *)txq_obj->sq_buf,
1206 wqe_n * sizeof(struct mlx5_wqe),
1207 IBV_ACCESS_LOCAL_WRITE);
1208 if (!txq_obj->sq_umem) {
1211 "Port %u Tx queue %u cannot register memory (SQ).",
1212 dev->data->port_id, txq_data->idx);
1215 /* Allocate doorbell record for send queue. */
1216 txq_obj->sq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
1218 &txq_obj->sq_dbrec_page);
1219 if (txq_obj->sq_dbrec_offset < 0) {
1221 DRV_LOG(ERR, "Failed to allocate SQ door-bell.");
1224 sq_attr.tis_lst_sz = 1;
1225 sq_attr.tis_num = priv->sh->tis->id;
1226 sq_attr.state = MLX5_SQC_STATE_RST;
1227 sq_attr.cqn = txq_obj->cq_obj.cq->id;
1228 sq_attr.flush_in_error_en = 1;
1229 sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
1230 sq_attr.allow_swp = !!priv->config.swp;
1231 sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
1232 sq_attr.wq_attr.uar_page =
1233 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
1234 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1235 sq_attr.wq_attr.pd = priv->sh->pdn;
1236 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
1237 sq_attr.wq_attr.log_wq_sz = log2above(wqe_n);
1238 sq_attr.wq_attr.dbr_umem_valid = 1;
1239 sq_attr.wq_attr.dbr_addr = txq_obj->sq_dbrec_offset;
1240 sq_attr.wq_attr.dbr_umem_id =
1241 mlx5_os_get_umem_id(txq_obj->sq_dbrec_page->umem);
1242 sq_attr.wq_attr.wq_umem_valid = 1;
1243 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem);
1244 sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
1245 /* Create Send Queue object with DevX. */
1246 txq_obj->sq_devx = mlx5_devx_cmd_create_sq(priv->sh->ctx, &sq_attr);
1247 if (!txq_obj->sq_devx) {
1249 DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
1250 dev->data->port_id, idx);
1256 mlx5_txq_release_devx_sq_resources(txq_obj);
1263 * Create the Tx queue DevX object.
1266 * Pointer to Ethernet device.
1268 * Queue index in DPDK Tx queue array.
1271 * 0 on success, a negative errno value otherwise and rte_errno is set.
1274 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
1276 struct mlx5_priv *priv = dev->data->dev_private;
1277 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1278 struct mlx5_txq_ctrl *txq_ctrl =
1279 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1281 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
1282 return mlx5_txq_obj_hairpin_new(dev, idx);
1283 #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
1284 DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
1285 dev->data->port_id, idx);
1289 struct mlx5_dev_ctx_shared *sh = priv->sh;
1290 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1291 struct mlx5_devx_cq_attr cq_attr = {
1292 .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
1295 uint32_t cqe_n, log_desc_n;
1299 MLX5_ASSERT(txq_data);
1300 MLX5_ASSERT(txq_obj);
1301 txq_obj->txq_ctrl = txq_ctrl;
1303 cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
1304 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
1305 log_desc_n = log2above(cqe_n);
1306 cqe_n = 1UL << log_desc_n;
1307 if (cqe_n > UINT16_MAX) {
1308 DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.",
1309 dev->data->port_id, txq_data->idx, cqe_n);
1313 /* Create completion queue object with DevX. */
1314 ret = mlx5_devx_cq_create(sh->ctx, &txq_obj->cq_obj, log_desc_n,
1315 &cq_attr, priv->sh->numa_node);
1317 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
1318 dev->data->port_id, idx);
1321 txq_data->cqe_n = log_desc_n;
1322 txq_data->cqe_s = cqe_n;
1323 txq_data->cqe_m = txq_data->cqe_s - 1;
1324 txq_data->cqes = txq_obj->cq_obj.cqes;
1325 txq_data->cq_ci = 0;
1326 txq_data->cq_pi = 0;
1327 txq_data->cq_db = txq_obj->cq_obj.db_rec;
1328 *txq_data->cq_db = 0;
1329 /* Create Send Queue object with DevX. */
1330 wqe_n = mlx5_txq_create_devx_sq_resources(dev, idx);
1335 /* Create the Work Queue. */
1336 txq_data->wqe_n = log2above(wqe_n);
1337 txq_data->wqe_s = 1 << txq_data->wqe_n;
1338 txq_data->wqe_m = txq_data->wqe_s - 1;
1339 txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf;
1340 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1341 txq_data->wqe_ci = 0;
1342 txq_data->wqe_pi = 0;
1343 txq_data->wqe_comp = 0;
1344 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1345 txq_data->qp_db = (volatile uint32_t *)
1346 (txq_obj->sq_dbrec_page->dbrs +
1347 txq_obj->sq_dbrec_offset +
1348 MLX5_SND_DBR * sizeof(uint32_t));
1349 *txq_data->qp_db = 0;
1350 txq_data->qp_num_8s = txq_obj->sq_devx->id << 8;
1351 /* Change Send Queue state to Ready-to-Send. */
1352 ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
1356 "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
1357 dev->data->port_id, idx);
1360 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1362 * If using DevX need to query and store TIS transport domain value.
1363 * This is done once per port.
1364 * Will use this value on Rx, when creating matching TIR.
1367 priv->sh->tdn = priv->sh->td->id;
1369 MLX5_ASSERT(sh->tx_uar);
1370 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
1371 MLX5_ASSERT(reg_addr);
1372 txq_ctrl->bf_reg = reg_addr;
1373 txq_ctrl->uar_mmap_offset =
1374 mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
1375 txq_uar_init(txq_ctrl);
1376 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1379 ret = rte_errno; /* Save rte_errno before cleanup. */
1380 mlx5_txq_release_devx_resources(txq_obj);
1381 rte_errno = ret; /* Restore rte_errno. */
1387 * Release an Tx DevX queue object.
1390 * DevX Tx queue object.
1393 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
1395 MLX5_ASSERT(txq_obj);
1396 if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
1398 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1399 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1401 mlx5_txq_release_devx_resources(txq_obj);
1406 struct mlx5_obj_ops devx_obj_ops = {
1407 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
1408 .rxq_obj_new = mlx5_rxq_devx_obj_new,
1409 .rxq_event_get = mlx5_rx_devx_get_event,
1410 .rxq_obj_modify = mlx5_devx_modify_rq,
1411 .rxq_obj_release = mlx5_rxq_devx_obj_release,
1412 .ind_table_new = mlx5_devx_ind_table_new,
1413 .ind_table_modify = mlx5_devx_ind_table_modify,
1414 .ind_table_destroy = mlx5_devx_ind_table_destroy,
1415 .hrxq_new = mlx5_devx_hrxq_new,
1416 .hrxq_destroy = mlx5_devx_tir_destroy,
1417 .hrxq_modify = mlx5_devx_hrxq_modify,
1418 .drop_action_create = mlx5_devx_drop_action_create,
1419 .drop_action_destroy = mlx5_devx_drop_action_destroy,
1420 .txq_obj_new = mlx5_txq_devx_obj_new,
1421 .txq_obj_modify = mlx5_devx_modify_sq,
1422 .txq_obj_release = mlx5_txq_devx_obj_release,