1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
10 #include <sys/queue.h>
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_malloc.h>
21 #include "mlx5_common_os.h"
22 #include "mlx5_rxtx.h"
23 #include "mlx5_utils.h"
24 #include "mlx5_devx.h"
25 #include "mlx5_flow.h"
26 #include "mlx5_flow_os.h"
29 * Modify RQ vlan stripping offload
35 * 0 on success, non-0 otherwise
38 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
40 struct mlx5_devx_modify_rq_attr rq_attr;
42 memset(&rq_attr, 0, sizeof(rq_attr));
43 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
44 rq_attr.state = MLX5_RQC_STATE_RDY;
45 rq_attr.vsd = (on ? 0 : 1);
46 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
47 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
51 * Modify RQ using DevX API.
54 * DevX Rx queue object.
56 * Type of change queue state.
59 * 0 on success, a negative errno value otherwise and rte_errno is set.
62 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
64 struct mlx5_devx_modify_rq_attr rq_attr;
66 memset(&rq_attr, 0, sizeof(rq_attr));
68 case MLX5_RXQ_MOD_ERR2RST:
69 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
70 rq_attr.state = MLX5_RQC_STATE_RST;
72 case MLX5_RXQ_MOD_RST2RDY:
73 rq_attr.rq_state = MLX5_RQC_STATE_RST;
74 rq_attr.state = MLX5_RQC_STATE_RDY;
76 case MLX5_RXQ_MOD_RDY2ERR:
77 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
78 rq_attr.state = MLX5_RQC_STATE_ERR;
80 case MLX5_RXQ_MOD_RDY2RST:
81 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
82 rq_attr.state = MLX5_RQC_STATE_RST;
87 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
91 * Modify SQ using DevX API.
94 * DevX Tx queue object.
96 * Type of change queue state.
101 * 0 on success, a negative errno value otherwise and rte_errno is set.
104 mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
107 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
110 if (type != MLX5_TXQ_MOD_RST2RDY) {
111 /* Change queue state to reset. */
112 if (type == MLX5_TXQ_MOD_ERR2RDY)
113 msq_attr.sq_state = MLX5_SQC_STATE_ERR;
115 msq_attr.sq_state = MLX5_SQC_STATE_RDY;
116 msq_attr.state = MLX5_SQC_STATE_RST;
117 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
119 DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
120 " %s", strerror(errno));
125 if (type != MLX5_TXQ_MOD_RDY2RST) {
126 /* Change queue state to ready. */
127 msq_attr.sq_state = MLX5_SQC_STATE_RST;
128 msq_attr.state = MLX5_SQC_STATE_RDY;
129 ret = mlx5_devx_cmd_modify_sq(obj->sq_devx, &msq_attr);
131 DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
132 " %s", strerror(errno));
138 * The dev_port variable is relevant only in Verbs API, and there is a
139 * pointer that points to this function and a parallel function in verbs
140 * intermittently, so they should have the same parameters.
147 * Release the resources allocated for an RQ DevX object.
150 * DevX Rx queue object.
153 mlx5_rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
155 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
157 if (rxq_ctrl->wq_umem) {
158 mlx5_os_umem_dereg(rxq_ctrl->wq_umem);
159 rxq_ctrl->wq_umem = NULL;
161 if (rxq_ctrl->rxq.wqes) {
162 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
163 rxq_ctrl->rxq.wqes = NULL;
166 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
167 mlx5_os_get_umem_id(dbr_page->umem),
168 rxq_ctrl->rq_dbr_offset));
169 rxq_ctrl->rq_dbrec_page = NULL;
174 * Release the resources allocated for the Rx CQ DevX object.
177 * DevX Rx queue object.
180 mlx5_rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
182 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
184 if (rxq_ctrl->cq_umem) {
185 mlx5_os_umem_dereg(rxq_ctrl->cq_umem);
186 rxq_ctrl->cq_umem = NULL;
188 if (rxq_ctrl->rxq.cqes) {
189 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
190 rxq_ctrl->rxq.cqes = NULL;
193 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
194 mlx5_os_get_umem_id(dbr_page->umem),
195 rxq_ctrl->cq_dbr_offset));
196 rxq_ctrl->cq_dbrec_page = NULL;
201 * Release an Rx DevX queue object.
204 * DevX Rx queue object.
207 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
209 MLX5_ASSERT(rxq_obj);
210 MLX5_ASSERT(rxq_obj->rq);
211 if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
212 mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);
213 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
215 MLX5_ASSERT(rxq_obj->devx_cq);
216 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
217 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
218 if (rxq_obj->devx_channel)
219 mlx5_os_devx_destroy_event_channel
220 (rxq_obj->devx_channel);
221 mlx5_rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
222 mlx5_rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
227 * Get event for an Rx DevX queue object.
230 * DevX Rx queue object.
233 * 0 on success, a negative errno value otherwise and rte_errno is set.
236 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
238 #ifdef HAVE_IBV_DEVX_EVENT
240 struct mlx5dv_devx_async_event_hdr event_resp;
241 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
243 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
251 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
260 #endif /* HAVE_IBV_DEVX_EVENT */
264 * Fill common fields of create RQ attributes structure.
267 * Pointer to Rx queue data.
269 * CQ number to use with this RQ.
271 * RQ attributes structure to fill..
274 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
275 struct mlx5_devx_create_rq_attr *rq_attr)
277 rq_attr->state = MLX5_RQC_STATE_RST;
278 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
280 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
284 * Fill common fields of DevX WQ attributes structure.
287 * Pointer to device private data.
289 * Pointer to Rx queue control structure.
291 * WQ attributes structure to fill..
294 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
295 struct mlx5_devx_wq_attr *wq_attr)
297 wq_attr->end_padding_mode = priv->config.hw_padding ?
298 MLX5_WQ_END_PAD_MODE_ALIGN :
299 MLX5_WQ_END_PAD_MODE_NONE;
300 wq_attr->pd = priv->sh->pdn;
301 wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
302 wq_attr->dbr_umem_id =
303 mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem);
304 wq_attr->dbr_umem_valid = 1;
305 wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
306 wq_attr->wq_umem_valid = 1;
310 * Create a RQ object using DevX.
313 * Pointer to Ethernet device.
315 * Queue index in DPDK Rx queue array.
318 * The DevX RQ object initialized, NULL otherwise and rte_errno is set.
320 static struct mlx5_devx_obj *
321 mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
323 struct mlx5_priv *priv = dev->data->dev_private;
324 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
325 struct mlx5_rxq_ctrl *rxq_ctrl =
326 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
327 struct mlx5_devx_create_rq_attr rq_attr = { 0 };
328 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
329 uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
330 struct mlx5_devx_dbr_page *dbr_page;
332 uint32_t wq_size = 0;
333 uint32_t wqe_size = 0;
334 uint32_t log_wqe_size = 0;
336 struct mlx5_devx_obj *rq;
338 /* Fill RQ attributes. */
339 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
340 rq_attr.flush_in_error_en = 1;
341 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
342 /* Fill WQ attributes for this RQ. */
343 if (mlx5_rxq_mprq_enabled(rxq_data)) {
344 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
346 * Number of strides in each WQE:
347 * 512*2^single_wqe_log_num_of_strides.
349 rq_attr.wq_attr.single_wqe_log_num_of_strides =
350 rxq_data->strd_num_n -
351 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
352 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
353 rq_attr.wq_attr.single_stride_log_num_of_bytes =
354 rxq_data->strd_sz_n -
355 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
356 wqe_size = sizeof(struct mlx5_wqe_mprq);
358 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
359 wqe_size = sizeof(struct mlx5_wqe_data_seg);
361 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
362 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
363 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
364 /* Calculate and allocate WQ memory space. */
365 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
366 wq_size = wqe_n * wqe_size;
367 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
368 if (alignment == (size_t)-1) {
369 DRV_LOG(ERR, "Failed to get mem page size");
373 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
374 alignment, rxq_ctrl->socket);
377 rxq_data->wqes = buf;
378 rxq_ctrl->wq_umem = mlx5_os_umem_reg(priv->sh->ctx,
380 if (!rxq_ctrl->wq_umem)
382 /* Allocate RQ door-bell. */
383 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
384 if (dbr_offset < 0) {
385 DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
388 rxq_ctrl->rq_dbr_offset = dbr_offset;
389 rxq_ctrl->rq_dbrec_page = dbr_page;
390 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
391 (uintptr_t)rxq_ctrl->rq_dbr_offset);
392 /* Create RQ using DevX API. */
393 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
394 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
399 mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
404 * Create a DevX CQ object for an Rx queue.
407 * Pointer to Ethernet device.
409 * Queue index in DPDK Rx queue array.
412 * The DevX CQ object initialized, NULL otherwise and rte_errno is set.
414 static struct mlx5_devx_obj *
415 mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
417 struct mlx5_devx_obj *cq_obj = 0;
418 struct mlx5_devx_cq_attr cq_attr = { 0 };
419 struct mlx5_priv *priv = dev->data->dev_private;
420 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
421 struct mlx5_rxq_ctrl *rxq_ctrl =
422 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
423 size_t page_size = rte_mem_page_size();
424 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
425 struct mlx5_devx_dbr_page *dbr_page;
428 uint16_t event_nums[1] = {0};
433 if (page_size == (size_t)-1) {
434 DRV_LOG(ERR, "Failed to get page_size.");
437 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
439 cq_attr.cqe_comp_en = 1u;
440 rxq_data->mcqe_format = priv->config.cqe_comp_fmt;
441 rxq_data->byte_mask = UINT32_MAX;
442 switch (priv->config.cqe_comp_fmt) {
443 case MLX5_CQE_RESP_FORMAT_HASH:
445 case MLX5_CQE_RESP_FORMAT_CSUM:
447 * Select CSUM miniCQE format only for non-vectorized
448 * MPRQ Rx burst, use HASH miniCQE format for others.
450 if (mlx5_rxq_check_vec_support(rxq_data) < 0 &&
451 mlx5_rxq_mprq_enabled(rxq_data))
452 cq_attr.mini_cqe_res_format =
453 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
455 cq_attr.mini_cqe_res_format =
456 MLX5_CQE_RESP_FORMAT_HASH;
457 rxq_data->mcqe_format = cq_attr.mini_cqe_res_format;
459 case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX:
460 rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK;
462 case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX:
463 cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt;
465 case MLX5_CQE_RESP_FORMAT_L34H_STRIDX:
466 cq_attr.mini_cqe_res_format = 0;
467 cq_attr.mini_cqe_res_format_ext = 1;
471 "Port %u Rx CQE compression is enabled, format %d.",
472 dev->data->port_id, priv->config.cqe_comp_fmt);
474 * For vectorized Rx, it must not be doubled in order to
475 * make cq_ci and rq_ci aligned.
477 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
479 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
481 "Port %u Rx CQE compression is disabled for HW"
484 } else if (priv->config.cqe_comp && rxq_data->lro) {
486 "Port %u Rx CQE compression is disabled for LRO.",
489 log_cqe_n = log2above(cqe_n);
490 cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
491 buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
494 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
497 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
498 rxq_ctrl->cq_umem = mlx5_os_umem_reg(priv->sh->ctx, buf,
500 IBV_ACCESS_LOCAL_WRITE);
501 if (!rxq_ctrl->cq_umem) {
502 DRV_LOG(ERR, "Failed to register umem for CQ.");
505 /* Allocate CQ door-bell. */
506 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
507 if (dbr_offset < 0) {
508 DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
511 rxq_ctrl->cq_dbr_offset = dbr_offset;
512 rxq_ctrl->cq_dbrec_page = dbr_page;
513 rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
514 (uintptr_t)rxq_ctrl->cq_dbr_offset);
516 mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
517 /* Create CQ using DevX API. */
518 cq_attr.eqn = priv->sh->eqn;
519 cq_attr.uar_page_id =
520 mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
521 cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
522 cq_attr.q_umem_valid = 1;
523 cq_attr.log_cq_size = log_cqe_n;
524 cq_attr.log_page_size = rte_log2_u32(page_size);
525 cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
526 cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
527 cq_attr.db_umem_valid = 1;
528 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
531 rxq_data->cqe_n = log_cqe_n;
532 rxq_data->cqn = cq_obj->id;
533 if (rxq_ctrl->obj->devx_channel) {
534 ret = mlx5_os_devx_subscribe_devx_event
535 (rxq_ctrl->obj->devx_channel,
539 (uint64_t)(uintptr_t)cq_obj);
541 DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
546 /* Initialise CQ to 1's to mark HW ownership for all CQEs. */
547 memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
551 mlx5_devx_cmd_destroy(cq_obj);
552 mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
557 * Create the Rx hairpin queue object.
560 * Pointer to Ethernet device.
562 * Queue index in DPDK Rx queue array.
565 * 0 on success, a negative errno value otherwise and rte_errno is set.
568 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
570 struct mlx5_priv *priv = dev->data->dev_private;
571 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
572 struct mlx5_rxq_ctrl *rxq_ctrl =
573 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
574 struct mlx5_devx_create_rq_attr attr = { 0 };
575 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
576 uint32_t max_wq_data;
578 MLX5_ASSERT(rxq_data);
580 tmpl->rxq_ctrl = rxq_ctrl;
582 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
583 /* Jumbo frames > 9KB should be supported, and more packets. */
584 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
585 if (priv->config.log_hp_size > max_wq_data) {
586 DRV_LOG(ERR, "Total data size %u power of 2 is "
587 "too large for hairpin.",
588 priv->config.log_hp_size);
592 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
594 attr.wq_attr.log_hairpin_data_sz =
595 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
596 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
598 /* Set the packets number to the maximum value for performance. */
599 attr.wq_attr.log_hairpin_num_packets =
600 attr.wq_attr.log_hairpin_data_sz -
601 MLX5_HAIRPIN_QUEUE_STRIDE;
602 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
606 "Port %u Rx hairpin queue %u can't create rq object.",
607 dev->data->port_id, idx);
611 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
616 * Create the Rx queue DevX object.
619 * Pointer to Ethernet device.
621 * Queue index in DPDK Rx queue array.
624 * 0 on success, a negative errno value otherwise and rte_errno is set.
627 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
629 struct mlx5_priv *priv = dev->data->dev_private;
630 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
631 struct mlx5_rxq_ctrl *rxq_ctrl =
632 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
633 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
636 MLX5_ASSERT(rxq_data);
638 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
639 return mlx5_rxq_obj_hairpin_new(dev, idx);
640 tmpl->rxq_ctrl = rxq_ctrl;
643 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
645 tmpl->devx_channel = mlx5_os_devx_create_event_channel
648 if (!tmpl->devx_channel) {
650 DRV_LOG(ERR, "Failed to create event channel %d.",
654 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
656 /* Create CQ using DevX API. */
657 tmpl->devx_cq = mlx5_rxq_create_devx_cq_resources(dev, idx);
658 if (!tmpl->devx_cq) {
659 DRV_LOG(ERR, "Failed to create CQ.");
662 /* Create RQ using DevX API. */
663 tmpl->rq = mlx5_rxq_create_devx_rq_resources(dev, idx);
665 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
666 dev->data->port_id, idx);
670 /* Change queue state to ready. */
671 ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);
674 rxq_data->cq_arm_sn = 0;
675 mlx5_rxq_initialize(rxq_data);
677 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
678 rxq_ctrl->wqn = tmpl->rq->id;
681 ret = rte_errno; /* Save rte_errno before cleanup. */
683 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
685 claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
686 if (tmpl->devx_channel)
687 mlx5_os_devx_destroy_event_channel(tmpl->devx_channel);
688 mlx5_rxq_release_devx_rq_resources(rxq_ctrl);
689 mlx5_rxq_release_devx_cq_resources(rxq_ctrl);
690 rte_errno = ret; /* Restore rte_errno. */
695 * Prepare RQT attribute structure for DevX RQT API.
698 * Pointer to Ethernet device.
700 * Log of number of queues in the array.
702 * DevX indirection table object.
705 * The RQT attr object initialized, NULL otherwise and rte_errno is set.
707 static struct mlx5_devx_rqt_attr *
708 mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
709 const unsigned int log_n,
710 const uint16_t *queues,
711 const uint32_t queues_n)
713 struct mlx5_priv *priv = dev->data->dev_private;
714 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
715 const unsigned int rqt_n = 1 << log_n;
718 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
719 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
721 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
726 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
727 rqt_attr->rqt_actual_size = rqt_n;
728 for (i = 0; i != queues_n; ++i) {
729 struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
730 struct mlx5_rxq_ctrl *rxq_ctrl =
731 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
733 rqt_attr->rq_list[i] = rxq_ctrl->obj->rq->id;
736 for (j = 0; i != rqt_n; ++j, ++i)
737 rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
742 * Create RQT using DevX API as a filed of indirection table.
745 * Pointer to Ethernet device.
747 * Log of number of queues in the array.
749 * DevX indirection table object.
752 * 0 on success, a negative errno value otherwise and rte_errno is set.
755 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
756 struct mlx5_ind_table_obj *ind_tbl)
758 struct mlx5_priv *priv = dev->data->dev_private;
759 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
761 MLX5_ASSERT(ind_tbl);
762 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
767 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
770 DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
779 * Modify RQT using DevX API as a filed of indirection table.
782 * Pointer to Ethernet device.
784 * Log of number of queues in the array.
786 * DevX indirection table object.
789 * 0 on success, a negative errno value otherwise and rte_errno is set.
792 mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
793 const uint16_t *queues, const uint32_t queues_n,
794 struct mlx5_ind_table_obj *ind_tbl)
797 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
799 MLX5_ASSERT(ind_tbl);
800 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
805 ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
808 DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
814 * Destroy the DevX RQT object.
817 * Indirection table to release.
820 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
822 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
826 * Set TIR attribute struct with relevant input values.
829 * Pointer to Ethernet device.
831 * RSS key for the Rx hash queue.
832 * @param[in] hash_fields
833 * Verbs protocol hash field to make the RSS on.
835 * Indirection table for TIR.
838 * @param[out] tir_attr
839 * Parameters structure for TIR creation/modification.
842 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
845 mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
846 uint64_t hash_fields,
847 const struct mlx5_ind_table_obj *ind_tbl,
848 int tunnel, struct mlx5_devx_tir_attr *tir_attr)
850 struct mlx5_priv *priv = dev->data->dev_private;
851 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
852 struct mlx5_rxq_ctrl *rxq_ctrl =
853 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
854 enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type;
858 /* Enable TIR LRO only if all the queues were configured for. */
859 for (i = 0; i < ind_tbl->queues_n; ++i) {
860 if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
865 memset(tir_attr, 0, sizeof(*tir_attr));
866 tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
867 tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
868 tir_attr->tunneled_offload_en = !!tunnel;
869 /* If needed, translate hash_fields bitmap to PRM format. */
871 struct mlx5_rx_hash_field_select *rx_hash_field_select =
872 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
873 hash_fields & IBV_RX_HASH_INNER ?
874 &tir_attr->rx_hash_field_selector_inner :
876 &tir_attr->rx_hash_field_selector_outer;
877 /* 1 bit: 0: IPv4, 1: IPv6. */
878 rx_hash_field_select->l3_prot_type =
879 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
880 /* 1 bit: 0: TCP, 1: UDP. */
881 rx_hash_field_select->l4_prot_type =
882 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
883 /* Bitmask which sets which fields to use in RX Hash. */
884 rx_hash_field_select->selected_fields =
885 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
886 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
887 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
888 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
889 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
890 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
891 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
892 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
894 if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN)
895 tir_attr->transport_domain = priv->sh->td->id;
897 tir_attr->transport_domain = priv->sh->tdn;
898 memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
899 tir_attr->indirect_table = ind_tbl->rqt->id;
900 if (dev->data->dev_conf.lpbk_mode)
901 tir_attr->self_lb_block =
902 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
904 tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout;
905 tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
906 tir_attr->lro_enable_mask =
907 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
908 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
913 * Create an Rx Hash queue.
916 * Pointer to Ethernet device.
918 * Pointer to Rx Hash queue.
923 * 0 on success, a negative errno value otherwise and rte_errno is set.
926 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
927 int tunnel __rte_unused)
929 struct mlx5_priv *priv = dev->data->dev_private;
930 struct mlx5_devx_tir_attr tir_attr = {0};
933 mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
934 hrxq->ind_table, tunnel, &tir_attr);
935 hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
937 DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
942 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
943 if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
951 err = rte_errno; /* Save rte_errno before cleanup. */
953 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
954 rte_errno = err; /* Restore rte_errno. */
959 * Destroy a DevX TIR object.
962 * Hash Rx queue to release its tir.
965 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
967 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
971 * Modify an Rx Hash queue configuration.
974 * Pointer to Ethernet device.
976 * Hash Rx queue to modify.
978 * RSS key for the Rx hash queue.
980 * Verbs protocol hash field to make the RSS on.
982 * Indirection table for TIR.
985 * 0 on success, a negative errno value otherwise and rte_errno is set.
988 mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
989 const uint8_t *rss_key,
990 uint64_t hash_fields,
991 const struct mlx5_ind_table_obj *ind_tbl)
993 struct mlx5_devx_modify_tir_attr modify_tir = {0};
996 * untested for modification fields:
997 * - rx_hash_symmetric not set in hrxq_new(),
998 * - rx_hash_fn set hard-coded in hrxq_new(),
999 * - lro_xxx not set after rxq setup
1001 if (ind_tbl != hrxq->ind_table)
1002 modify_tir.modify_bitmask |=
1003 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
1004 if (hash_fields != hrxq->hash_fields ||
1005 memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
1006 modify_tir.modify_bitmask |=
1007 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
1008 mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
1009 0, /* N/A - tunnel modification unsupported */
1011 modify_tir.tirn = hrxq->tir->id;
1012 if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
1013 DRV_LOG(ERR, "port %u cannot modify DevX TIR",
1014 dev->data->port_id);
1022 * Create a DevX drop action for Rx Hash queue.
1025 * Pointer to Ethernet device.
1028 * 0 on success, a negative errno value otherwise and rte_errno is set.
1031 mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
1034 DRV_LOG(ERR, "DevX drop action is not supported yet.");
1035 rte_errno = ENOTSUP;
1040 * Release a drop hash Rx queue.
1043 * Pointer to Ethernet device.
1046 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
1049 DRV_LOG(ERR, "DevX drop action is not supported yet.");
1050 rte_errno = ENOTSUP;
1054 * Create the Tx hairpin queue object.
1057 * Pointer to Ethernet device.
1059 * Queue index in DPDK Tx queue array.
1062 * 0 on success, a negative errno value otherwise and rte_errno is set.
1065 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
1067 struct mlx5_priv *priv = dev->data->dev_private;
1068 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1069 struct mlx5_txq_ctrl *txq_ctrl =
1070 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1071 struct mlx5_devx_create_sq_attr attr = { 0 };
1072 struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
1073 uint32_t max_wq_data;
1075 MLX5_ASSERT(txq_data);
1077 tmpl->txq_ctrl = txq_ctrl;
1079 attr.tis_lst_sz = 1;
1080 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
1081 /* Jumbo frames > 9KB should be supported, and more packets. */
1082 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
1083 if (priv->config.log_hp_size > max_wq_data) {
1084 DRV_LOG(ERR, "Total data size %u power of 2 is "
1085 "too large for hairpin.",
1086 priv->config.log_hp_size);
1090 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
1092 attr.wq_attr.log_hairpin_data_sz =
1093 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
1094 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
1096 /* Set the packets number to the maximum value for performance. */
1097 attr.wq_attr.log_hairpin_num_packets =
1098 attr.wq_attr.log_hairpin_data_sz -
1099 MLX5_HAIRPIN_QUEUE_STRIDE;
1100 attr.tis_num = priv->sh->tis->id;
1101 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
1104 "Port %u tx hairpin queue %u can't create SQ object.",
1105 dev->data->port_id, idx);
1112 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1114 * Release DevX SQ resources.
1117 * DevX Tx queue object.
1120 mlx5_txq_release_devx_sq_resources(struct mlx5_txq_obj *txq_obj)
1122 if (txq_obj->sq_devx) {
1123 claim_zero(mlx5_devx_cmd_destroy(txq_obj->sq_devx));
1124 txq_obj->sq_devx = NULL;
1126 if (txq_obj->sq_umem) {
1127 claim_zero(mlx5_os_umem_dereg(txq_obj->sq_umem));
1128 txq_obj->sq_umem = NULL;
1130 if (txq_obj->sq_buf) {
1131 mlx5_free(txq_obj->sq_buf);
1132 txq_obj->sq_buf = NULL;
1134 if (txq_obj->sq_dbrec_page) {
1135 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
1137 (txq_obj->sq_dbrec_page->umem),
1138 txq_obj->sq_dbrec_offset));
1139 txq_obj->sq_dbrec_page = NULL;
1144 * Release DevX Tx CQ resources.
1147 * DevX Tx queue object.
1150 mlx5_txq_release_devx_cq_resources(struct mlx5_txq_obj *txq_obj)
1152 if (txq_obj->cq_devx)
1153 claim_zero(mlx5_devx_cmd_destroy(txq_obj->cq_devx));
1154 if (txq_obj->cq_umem)
1155 claim_zero(mlx5_os_umem_dereg(txq_obj->cq_umem));
1156 if (txq_obj->cq_buf)
1157 mlx5_free(txq_obj->cq_buf);
1158 if (txq_obj->cq_dbrec_page)
1159 claim_zero(mlx5_release_dbr(&txq_obj->txq_ctrl->priv->dbrpgs,
1161 (txq_obj->cq_dbrec_page->umem),
1162 txq_obj->cq_dbrec_offset));
1166 * Destroy the Tx queue DevX object.
1169 * Txq object to destroy.
1172 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
1174 mlx5_txq_release_devx_sq_resources(txq_obj);
1175 mlx5_txq_release_devx_cq_resources(txq_obj);
1179 * Create a DevX CQ object and its resources for an Tx queue.
1182 * Pointer to Ethernet device.
1184 * Queue index in DPDK Tx queue array.
1187 * Number of CQEs in CQ, 0 otherwise and rte_errno is set.
1190 mlx5_txq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
1192 struct mlx5_priv *priv = dev->data->dev_private;
1193 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1194 struct mlx5_txq_ctrl *txq_ctrl =
1195 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1196 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1197 struct mlx5_devx_cq_attr cq_attr = { 0 };
1198 struct mlx5_cqe *cqe;
1205 MLX5_ASSERT(txq_data);
1206 MLX5_ASSERT(txq_obj);
1207 page_size = rte_mem_page_size();
1208 if (page_size == (size_t)-1) {
1209 DRV_LOG(ERR, "Failed to get mem page size.");
1213 /* Allocate memory buffer for CQEs. */
1214 alignment = MLX5_CQE_BUF_ALIGNMENT;
1215 if (alignment == (size_t)-1) {
1216 DRV_LOG(ERR, "Failed to get CQE buf alignment.");
1220 /* Create the Completion Queue. */
1221 cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
1222 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
1223 cqe_n = 1UL << log2above(cqe_n);
1224 if (cqe_n > UINT16_MAX) {
1226 "Port %u Tx queue %u requests to many CQEs %u.",
1227 dev->data->port_id, txq_data->idx, cqe_n);
1231 txq_obj->cq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1232 cqe_n * sizeof(struct mlx5_cqe),
1234 priv->sh->numa_node);
1235 if (!txq_obj->cq_buf) {
1237 "Port %u Tx queue %u cannot allocate memory (CQ).",
1238 dev->data->port_id, txq_data->idx);
1242 /* Register allocated buffer in user space with DevX. */
1243 txq_obj->cq_umem = mlx5_os_umem_reg(priv->sh->ctx,
1244 (void *)txq_obj->cq_buf,
1245 cqe_n * sizeof(struct mlx5_cqe),
1246 IBV_ACCESS_LOCAL_WRITE);
1247 if (!txq_obj->cq_umem) {
1250 "Port %u Tx queue %u cannot register memory (CQ).",
1251 dev->data->port_id, txq_data->idx);
1254 /* Allocate doorbell record for completion queue. */
1255 txq_obj->cq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
1257 &txq_obj->cq_dbrec_page);
1258 if (txq_obj->cq_dbrec_offset < 0) {
1260 DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
1263 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
1264 cq_attr.eqn = priv->sh->eqn;
1265 cq_attr.q_umem_valid = 1;
1266 cq_attr.q_umem_offset = (uintptr_t)txq_obj->cq_buf % page_size;
1267 cq_attr.q_umem_id = mlx5_os_get_umem_id(txq_obj->cq_umem);
1268 cq_attr.db_umem_valid = 1;
1269 cq_attr.db_umem_offset = txq_obj->cq_dbrec_offset;
1270 cq_attr.db_umem_id = mlx5_os_get_umem_id(txq_obj->cq_dbrec_page->umem);
1271 cq_attr.log_cq_size = rte_log2_u32(cqe_n);
1272 cq_attr.log_page_size = rte_log2_u32(page_size);
1273 /* Create completion queue object with DevX. */
1274 txq_obj->cq_devx = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
1275 if (!txq_obj->cq_devx) {
1277 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
1278 dev->data->port_id, idx);
1281 /* Initial fill CQ buffer with invalid CQE opcode. */
1282 cqe = (struct mlx5_cqe *)txq_obj->cq_buf;
1283 for (i = 0; i < cqe_n; i++) {
1284 cqe->op_own = (MLX5_CQE_INVALID << 4) | MLX5_CQE_OWNER_MASK;
1290 mlx5_txq_release_devx_cq_resources(txq_obj);
1296 * Create a SQ object and its resources using DevX.
1299 * Pointer to Ethernet device.
1301 * Queue index in DPDK Tx queue array.
1304 * Number of WQEs in SQ, 0 otherwise and rte_errno is set.
1307 mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx)
1309 struct mlx5_priv *priv = dev->data->dev_private;
1310 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1311 struct mlx5_txq_ctrl *txq_ctrl =
1312 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1313 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1314 struct mlx5_devx_create_sq_attr sq_attr = { 0 };
1319 MLX5_ASSERT(txq_data);
1320 MLX5_ASSERT(txq_obj);
1321 page_size = rte_mem_page_size();
1322 if (page_size == (size_t)-1) {
1323 DRV_LOG(ERR, "Failed to get mem page size.");
1327 wqe_n = RTE_MIN(1UL << txq_data->elts_n,
1328 (uint32_t)priv->sh->device_attr.max_qp_wr);
1329 txq_obj->sq_buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
1330 wqe_n * sizeof(struct mlx5_wqe),
1331 page_size, priv->sh->numa_node);
1332 if (!txq_obj->sq_buf) {
1334 "Port %u Tx queue %u cannot allocate memory (SQ).",
1335 dev->data->port_id, txq_data->idx);
1339 /* Register allocated buffer in user space with DevX. */
1340 txq_obj->sq_umem = mlx5_os_umem_reg
1342 (void *)txq_obj->sq_buf,
1343 wqe_n * sizeof(struct mlx5_wqe),
1344 IBV_ACCESS_LOCAL_WRITE);
1345 if (!txq_obj->sq_umem) {
1348 "Port %u Tx queue %u cannot register memory (SQ).",
1349 dev->data->port_id, txq_data->idx);
1352 /* Allocate doorbell record for send queue. */
1353 txq_obj->sq_dbrec_offset = mlx5_get_dbr(priv->sh->ctx,
1355 &txq_obj->sq_dbrec_page);
1356 if (txq_obj->sq_dbrec_offset < 0) {
1358 DRV_LOG(ERR, "Failed to allocate SQ door-bell.");
1361 sq_attr.tis_lst_sz = 1;
1362 sq_attr.tis_num = priv->sh->tis->id;
1363 sq_attr.state = MLX5_SQC_STATE_RST;
1364 sq_attr.cqn = txq_obj->cq_devx->id;
1365 sq_attr.flush_in_error_en = 1;
1366 sq_attr.allow_multi_pkt_send_wqe = !!priv->config.mps;
1367 sq_attr.allow_swp = !!priv->config.swp;
1368 sq_attr.min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode;
1369 sq_attr.wq_attr.uar_page =
1370 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar);
1371 sq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
1372 sq_attr.wq_attr.pd = priv->sh->pdn;
1373 sq_attr.wq_attr.log_wq_stride = rte_log2_u32(MLX5_WQE_SIZE);
1374 sq_attr.wq_attr.log_wq_sz = log2above(wqe_n);
1375 sq_attr.wq_attr.dbr_umem_valid = 1;
1376 sq_attr.wq_attr.dbr_addr = txq_obj->sq_dbrec_offset;
1377 sq_attr.wq_attr.dbr_umem_id =
1378 mlx5_os_get_umem_id(txq_obj->sq_dbrec_page->umem);
1379 sq_attr.wq_attr.wq_umem_valid = 1;
1380 sq_attr.wq_attr.wq_umem_id = mlx5_os_get_umem_id(txq_obj->sq_umem);
1381 sq_attr.wq_attr.wq_umem_offset = (uintptr_t)txq_obj->sq_buf % page_size;
1382 /* Create Send Queue object with DevX. */
1383 txq_obj->sq_devx = mlx5_devx_cmd_create_sq(priv->sh->ctx, &sq_attr);
1384 if (!txq_obj->sq_devx) {
1386 DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
1387 dev->data->port_id, idx);
1393 mlx5_txq_release_devx_sq_resources(txq_obj);
1400 * Create the Tx queue DevX object.
1403 * Pointer to Ethernet device.
1405 * Queue index in DPDK Tx queue array.
1408 * 0 on success, a negative errno value otherwise and rte_errno is set.
1411 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
1413 struct mlx5_priv *priv = dev->data->dev_private;
1414 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1415 struct mlx5_txq_ctrl *txq_ctrl =
1416 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1418 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
1419 return mlx5_txq_obj_hairpin_new(dev, idx);
1420 #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
1421 DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
1422 dev->data->port_id, idx);
1426 struct mlx5_dev_ctx_shared *sh = priv->sh;
1427 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1433 MLX5_ASSERT(txq_data);
1434 MLX5_ASSERT(txq_obj);
1435 txq_obj->txq_ctrl = txq_ctrl;
1437 cqe_n = mlx5_txq_create_devx_cq_resources(dev, idx);
1442 txq_data->cqe_n = log2above(cqe_n);
1443 txq_data->cqe_s = 1 << txq_data->cqe_n;
1444 txq_data->cqe_m = txq_data->cqe_s - 1;
1445 txq_data->cqes = (volatile struct mlx5_cqe *)txq_obj->cq_buf;
1446 txq_data->cq_ci = 0;
1447 txq_data->cq_pi = 0;
1448 txq_data->cq_db = (volatile uint32_t *)(txq_obj->cq_dbrec_page->dbrs +
1449 txq_obj->cq_dbrec_offset);
1450 *txq_data->cq_db = 0;
1451 /* Create Send Queue object with DevX. */
1452 wqe_n = mlx5_txq_create_devx_sq_resources(dev, idx);
1457 /* Create the Work Queue. */
1458 txq_data->wqe_n = log2above(wqe_n);
1459 txq_data->wqe_s = 1 << txq_data->wqe_n;
1460 txq_data->wqe_m = txq_data->wqe_s - 1;
1461 txq_data->wqes = (struct mlx5_wqe *)txq_obj->sq_buf;
1462 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1463 txq_data->wqe_ci = 0;
1464 txq_data->wqe_pi = 0;
1465 txq_data->wqe_comp = 0;
1466 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1467 txq_data->qp_db = (volatile uint32_t *)
1468 (txq_obj->sq_dbrec_page->dbrs +
1469 txq_obj->sq_dbrec_offset +
1470 MLX5_SND_DBR * sizeof(uint32_t));
1471 *txq_data->qp_db = 0;
1472 txq_data->qp_num_8s = txq_obj->sq_devx->id << 8;
1473 /* Change Send Queue state to Ready-to-Send. */
1474 ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
1478 "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
1479 dev->data->port_id, idx);
1482 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1484 * If using DevX need to query and store TIS transport domain value.
1485 * This is done once per port.
1486 * Will use this value on Rx, when creating matching TIR.
1489 priv->sh->tdn = priv->sh->td->id;
1491 MLX5_ASSERT(sh->tx_uar);
1492 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
1493 MLX5_ASSERT(reg_addr);
1494 txq_ctrl->bf_reg = reg_addr;
1495 txq_ctrl->uar_mmap_offset =
1496 mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
1497 txq_uar_init(txq_ctrl);
1498 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1501 ret = rte_errno; /* Save rte_errno before cleanup. */
1502 mlx5_txq_release_devx_resources(txq_obj);
1503 rte_errno = ret; /* Restore rte_errno. */
1509 * Release an Tx DevX queue object.
1512 * DevX Tx queue object.
1515 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
1517 MLX5_ASSERT(txq_obj);
1518 if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
1520 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1521 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1523 mlx5_txq_release_devx_resources(txq_obj);
1528 struct mlx5_obj_ops devx_obj_ops = {
1529 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
1530 .rxq_obj_new = mlx5_rxq_devx_obj_new,
1531 .rxq_event_get = mlx5_rx_devx_get_event,
1532 .rxq_obj_modify = mlx5_devx_modify_rq,
1533 .rxq_obj_release = mlx5_rxq_devx_obj_release,
1534 .ind_table_new = mlx5_devx_ind_table_new,
1535 .ind_table_modify = mlx5_devx_ind_table_modify,
1536 .ind_table_destroy = mlx5_devx_ind_table_destroy,
1537 .hrxq_new = mlx5_devx_hrxq_new,
1538 .hrxq_destroy = mlx5_devx_tir_destroy,
1539 .hrxq_modify = mlx5_devx_hrxq_modify,
1540 .drop_action_create = mlx5_devx_drop_action_create,
1541 .drop_action_destroy = mlx5_devx_drop_action_destroy,
1542 .txq_obj_new = mlx5_txq_devx_obj_new,
1543 .txq_obj_modify = mlx5_devx_modify_sq,
1544 .txq_obj_release = mlx5_txq_devx_obj_release,