1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
10 #include <sys/queue.h>
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_malloc.h>
21 #include "mlx5_common_os.h"
22 #include "mlx5_rxtx.h"
23 #include "mlx5_utils.h"
24 #include "mlx5_devx.h"
25 #include "mlx5_flow.h"
29 * Modify RQ vlan stripping offload
35 * 0 on success, non-0 otherwise
38 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
40 struct mlx5_devx_modify_rq_attr rq_attr;
42 memset(&rq_attr, 0, sizeof(rq_attr));
43 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
44 rq_attr.state = MLX5_RQC_STATE_RDY;
45 rq_attr.vsd = (on ? 0 : 1);
46 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
47 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
51 * Modify RQ using DevX API.
54 * DevX Rx queue object.
57 * 0 on success, a negative errno value otherwise and rte_errno is set.
60 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, bool is_start)
62 struct mlx5_devx_modify_rq_attr rq_attr;
64 memset(&rq_attr, 0, sizeof(rq_attr));
66 rq_attr.rq_state = MLX5_RQC_STATE_RST;
67 rq_attr.state = MLX5_RQC_STATE_RDY;
69 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
70 rq_attr.state = MLX5_RQC_STATE_RST;
72 return mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
76 * Release the resources allocated for an RQ DevX object.
79 * DevX Rx queue object.
82 rxq_release_devx_rq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
84 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->rq_dbrec_page;
86 if (rxq_ctrl->rxq.wqes) {
87 mlx5_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
88 rxq_ctrl->rxq.wqes = NULL;
90 if (rxq_ctrl->wq_umem) {
91 mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
92 rxq_ctrl->wq_umem = NULL;
95 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
96 mlx5_os_get_umem_id(dbr_page->umem),
97 rxq_ctrl->rq_dbr_offset));
98 rxq_ctrl->rq_dbrec_page = NULL;
103 * Release the resources allocated for the Rx CQ DevX object.
106 * DevX Rx queue object.
109 rxq_release_devx_cq_resources(struct mlx5_rxq_ctrl *rxq_ctrl)
111 struct mlx5_devx_dbr_page *dbr_page = rxq_ctrl->cq_dbrec_page;
113 if (rxq_ctrl->rxq.cqes) {
114 rte_free((void *)(uintptr_t)rxq_ctrl->rxq.cqes);
115 rxq_ctrl->rxq.cqes = NULL;
117 if (rxq_ctrl->cq_umem) {
118 mlx5_glue->devx_umem_dereg(rxq_ctrl->cq_umem);
119 rxq_ctrl->cq_umem = NULL;
122 claim_zero(mlx5_release_dbr(&rxq_ctrl->priv->dbrpgs,
123 mlx5_os_get_umem_id(dbr_page->umem),
124 rxq_ctrl->cq_dbr_offset));
125 rxq_ctrl->cq_dbrec_page = NULL;
130 * Release an Rx DevX queue object.
133 * DevX Rx queue object.
136 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
138 MLX5_ASSERT(rxq_obj);
139 MLX5_ASSERT(rxq_obj->rq);
140 if (rxq_obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN) {
141 mlx5_devx_modify_rq(rxq_obj, false);
142 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
144 MLX5_ASSERT(rxq_obj->devx_cq);
145 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
146 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->devx_cq));
147 if (rxq_obj->devx_channel)
148 mlx5_glue->devx_destroy_event_channel
149 (rxq_obj->devx_channel);
150 rxq_release_devx_rq_resources(rxq_obj->rxq_ctrl);
151 rxq_release_devx_cq_resources(rxq_obj->rxq_ctrl);
156 * Get event for an Rx DevX queue object.
159 * DevX Rx queue object.
162 * 0 on success, a negative errno value otherwise and rte_errno is set.
165 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
167 #ifdef HAVE_IBV_DEVX_EVENT
169 struct mlx5dv_devx_async_event_hdr event_resp;
170 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
172 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
180 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->devx_cq) {
189 #endif /* HAVE_IBV_DEVX_EVENT */
193 * Fill common fields of create RQ attributes structure.
196 * Pointer to Rx queue data.
198 * CQ number to use with this RQ.
200 * RQ attributes structure to fill..
203 mlx5_devx_create_rq_attr_fill(struct mlx5_rxq_data *rxq_data, uint32_t cqn,
204 struct mlx5_devx_create_rq_attr *rq_attr)
206 rq_attr->state = MLX5_RQC_STATE_RST;
207 rq_attr->vsd = (rxq_data->vlan_strip) ? 0 : 1;
209 rq_attr->scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
213 * Fill common fields of DevX WQ attributes structure.
216 * Pointer to device private data.
218 * Pointer to Rx queue control structure.
220 * WQ attributes structure to fill..
223 mlx5_devx_wq_attr_fill(struct mlx5_priv *priv, struct mlx5_rxq_ctrl *rxq_ctrl,
224 struct mlx5_devx_wq_attr *wq_attr)
226 wq_attr->end_padding_mode = priv->config.cqe_pad ?
227 MLX5_WQ_END_PAD_MODE_ALIGN :
228 MLX5_WQ_END_PAD_MODE_NONE;
229 wq_attr->pd = priv->sh->pdn;
230 wq_attr->dbr_addr = rxq_ctrl->rq_dbr_offset;
231 wq_attr->dbr_umem_id =
232 mlx5_os_get_umem_id(rxq_ctrl->rq_dbrec_page->umem);
233 wq_attr->dbr_umem_valid = 1;
234 wq_attr->wq_umem_id = mlx5_os_get_umem_id(rxq_ctrl->wq_umem);
235 wq_attr->wq_umem_valid = 1;
239 * Create a RQ object using DevX.
242 * Pointer to Ethernet device.
244 * Queue index in DPDK Rx queue array.
247 * The DevX RQ object initialized, NULL otherwise and rte_errno is set.
249 static struct mlx5_devx_obj *
250 rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
252 struct mlx5_priv *priv = dev->data->dev_private;
253 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
254 struct mlx5_rxq_ctrl *rxq_ctrl =
255 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
256 struct mlx5_devx_create_rq_attr rq_attr = { 0 };
257 uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
258 uint32_t cqn = rxq_ctrl->obj->devx_cq->id;
259 struct mlx5_devx_dbr_page *dbr_page;
261 uint32_t wq_size = 0;
262 uint32_t wqe_size = 0;
263 uint32_t log_wqe_size = 0;
265 struct mlx5_devx_obj *rq;
267 /* Fill RQ attributes. */
268 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
269 rq_attr.flush_in_error_en = 1;
270 mlx5_devx_create_rq_attr_fill(rxq_data, cqn, &rq_attr);
271 /* Fill WQ attributes for this RQ. */
272 if (mlx5_rxq_mprq_enabled(rxq_data)) {
273 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
275 * Number of strides in each WQE:
276 * 512*2^single_wqe_log_num_of_strides.
278 rq_attr.wq_attr.single_wqe_log_num_of_strides =
279 rxq_data->strd_num_n -
280 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
281 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
282 rq_attr.wq_attr.single_stride_log_num_of_bytes =
283 rxq_data->strd_sz_n -
284 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
285 wqe_size = sizeof(struct mlx5_wqe_mprq);
287 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
288 wqe_size = sizeof(struct mlx5_wqe_data_seg);
290 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
291 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
292 rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
293 /* Calculate and allocate WQ memory space. */
294 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
295 wq_size = wqe_n * wqe_size;
296 size_t alignment = MLX5_WQE_BUF_ALIGNMENT;
297 if (alignment == (size_t)-1) {
298 DRV_LOG(ERR, "Failed to get mem page size");
302 buf = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, wq_size,
303 alignment, rxq_ctrl->socket);
306 rxq_data->wqes = buf;
307 rxq_ctrl->wq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx,
309 if (!rxq_ctrl->wq_umem)
311 /* Allocate RQ door-bell. */
312 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
313 if (dbr_offset < 0) {
314 DRV_LOG(ERR, "Failed to allocate RQ door-bell.");
317 rxq_ctrl->rq_dbr_offset = dbr_offset;
318 rxq_ctrl->rq_dbrec_page = dbr_page;
319 rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
320 (uintptr_t)rxq_ctrl->rq_dbr_offset);
321 /* Create RQ using DevX API. */
322 mlx5_devx_wq_attr_fill(priv, rxq_ctrl, &rq_attr.wq_attr);
323 rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &rq_attr, rxq_ctrl->socket);
328 rxq_release_devx_rq_resources(rxq_ctrl);
333 * Create a DevX CQ object for an Rx queue.
336 * Pointer to Ethernet device.
338 * Queue index in DPDK Rx queue array.
341 * The DevX CQ object initialized, NULL otherwise and rte_errno is set.
343 static struct mlx5_devx_obj *
344 rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
346 struct mlx5_devx_obj *cq_obj = 0;
347 struct mlx5_devx_cq_attr cq_attr = { 0 };
348 struct mlx5_priv *priv = dev->data->dev_private;
349 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
350 struct mlx5_rxq_ctrl *rxq_ctrl =
351 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
352 size_t page_size = rte_mem_page_size();
353 uint32_t lcore = (uint32_t)rte_lcore_to_cpu_id(-1);
354 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
355 struct mlx5_devx_dbr_page *dbr_page;
359 uint16_t event_nums[1] = {0};
364 if (page_size == (size_t)-1) {
365 DRV_LOG(ERR, "Failed to get page_size.");
368 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
370 cq_attr.cqe_comp_en = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
371 #ifdef HAVE_IBV_DEVICE_STRIDING_RQ_SUPPORT
372 cq_attr.mini_cqe_res_format =
373 mlx5_rxq_mprq_enabled(rxq_data) ?
374 MLX5DV_CQE_RES_FORMAT_CSUM_STRIDX :
375 MLX5DV_CQE_RES_FORMAT_HASH;
377 cq_attr.mini_cqe_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
380 * For vectorized Rx, it must not be doubled in order to
381 * make cq_ci and rq_ci aligned.
383 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
385 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
387 "Port %u Rx CQE compression is disabled for HW"
390 } else if (priv->config.cqe_comp && rxq_data->lro) {
392 "Port %u Rx CQE compression is disabled for LRO.",
395 #ifdef HAVE_IBV_MLX5_MOD_CQE_128B_PAD
396 if (priv->config.cqe_pad)
397 cq_attr.cqe_size = MLX5DV_CQ_INIT_ATTR_FLAGS_CQE_PAD;
399 log_cqe_n = log2above(cqe_n);
400 cq_size = sizeof(struct mlx5_cqe) * (1 << log_cqe_n);
401 /* Query the EQN for this core. */
402 if (mlx5_glue->devx_query_eqn(priv->sh->ctx, lcore, &eqn)) {
403 DRV_LOG(ERR, "Failed to query EQN for CQ.");
407 buf = rte_calloc_socket(__func__, 1, cq_size, page_size,
410 DRV_LOG(ERR, "Failed to allocate memory for CQ.");
413 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)buf;
414 rxq_ctrl->cq_umem = mlx5_glue->devx_umem_reg(priv->sh->ctx, buf,
416 IBV_ACCESS_LOCAL_WRITE);
417 if (!rxq_ctrl->cq_umem) {
418 DRV_LOG(ERR, "Failed to register umem for CQ.");
421 /* Allocate CQ door-bell. */
422 dbr_offset = mlx5_get_dbr(priv->sh->ctx, &priv->dbrpgs, &dbr_page);
423 if (dbr_offset < 0) {
424 DRV_LOG(ERR, "Failed to allocate CQ door-bell.");
427 rxq_ctrl->cq_dbr_offset = dbr_offset;
428 rxq_ctrl->cq_dbrec_page = dbr_page;
429 rxq_data->cq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
430 (uintptr_t)rxq_ctrl->cq_dbr_offset);
432 mlx5_os_get_devx_uar_base_addr(priv->sh->devx_rx_uar);
433 /* Create CQ using DevX API. */
434 cq_attr.uar_page_id =
435 mlx5_os_get_devx_uar_page_id(priv->sh->devx_rx_uar);
436 cq_attr.q_umem_id = mlx5_os_get_umem_id(rxq_ctrl->cq_umem);
437 cq_attr.q_umem_valid = 1;
438 cq_attr.log_cq_size = log_cqe_n;
439 cq_attr.log_page_size = rte_log2_u32(page_size);
440 cq_attr.db_umem_offset = rxq_ctrl->cq_dbr_offset;
441 cq_attr.db_umem_id = mlx5_os_get_umem_id(dbr_page->umem);
442 cq_attr.db_umem_valid = 1;
443 cq_obj = mlx5_devx_cmd_create_cq(priv->sh->ctx, &cq_attr);
446 rxq_data->cqe_n = log_cqe_n;
447 rxq_data->cqn = cq_obj->id;
448 if (rxq_ctrl->obj->devx_channel) {
449 ret = mlx5_glue->devx_subscribe_devx_event
450 (rxq_ctrl->obj->devx_channel,
454 (uint64_t)(uintptr_t)cq_obj);
456 DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
461 /* Initialise CQ to 1's to mark HW ownership for all CQEs. */
462 memset((void *)(uintptr_t)rxq_data->cqes, 0xFF, cq_size);
466 mlx5_devx_cmd_destroy(cq_obj);
467 rxq_release_devx_cq_resources(rxq_ctrl);
472 * Create the Rx hairpin queue object.
475 * Pointer to Ethernet device.
477 * Queue index in DPDK Rx queue array.
480 * 0 on success, a negative errno value otherwise and rte_errno is set.
483 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
485 struct mlx5_priv *priv = dev->data->dev_private;
486 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
487 struct mlx5_rxq_ctrl *rxq_ctrl =
488 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
489 struct mlx5_devx_create_rq_attr attr = { 0 };
490 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
491 uint32_t max_wq_data;
493 MLX5_ASSERT(rxq_data);
495 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
496 tmpl->rxq_ctrl = rxq_ctrl;
498 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
499 /* Jumbo frames > 9KB should be supported, and more packets. */
500 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
501 if (priv->config.log_hp_size > max_wq_data) {
502 DRV_LOG(ERR, "Total data size %u power of 2 is "
503 "too large for hairpin.",
504 priv->config.log_hp_size);
508 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
510 attr.wq_attr.log_hairpin_data_sz =
511 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
512 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
514 /* Set the packets number to the maximum value for performance. */
515 attr.wq_attr.log_hairpin_num_packets =
516 attr.wq_attr.log_hairpin_data_sz -
517 MLX5_HAIRPIN_QUEUE_STRIDE;
518 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
522 "Port %u Rx hairpin queue %u can't create rq object.",
523 dev->data->port_id, idx);
527 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
532 * Create the Rx queue DevX object.
535 * Pointer to Ethernet device.
537 * Queue index in DPDK Rx queue array.
540 * 0 on success, a negative errno value otherwise and rte_errno is set.
543 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
545 struct mlx5_priv *priv = dev->data->dev_private;
546 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
547 struct mlx5_rxq_ctrl *rxq_ctrl =
548 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
549 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
552 MLX5_ASSERT(rxq_data);
554 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
555 return mlx5_rxq_obj_hairpin_new(dev, idx);
556 tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_RQ;
557 tmpl->rxq_ctrl = rxq_ctrl;
560 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
562 tmpl->devx_channel = mlx5_glue->devx_create_event_channel
565 if (!tmpl->devx_channel) {
567 DRV_LOG(ERR, "Failed to create event channel %d.",
571 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
573 /* Create CQ using DevX API. */
574 tmpl->devx_cq = rxq_create_devx_cq_resources(dev, idx);
575 if (!tmpl->devx_cq) {
576 DRV_LOG(ERR, "Failed to create CQ.");
579 /* Create RQ using DevX API. */
580 tmpl->rq = rxq_create_devx_rq_resources(dev, idx);
582 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
583 dev->data->port_id, idx);
587 /* Change queue state to ready. */
588 ret = mlx5_devx_modify_rq(tmpl, true);
591 rxq_data->cq_arm_sn = 0;
592 mlx5_rxq_initialize(rxq_data);
594 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
595 rxq_ctrl->wqn = tmpl->rq->id;
598 ret = rte_errno; /* Save rte_errno before cleanup. */
600 claim_zero(mlx5_devx_cmd_destroy(tmpl->rq));
602 claim_zero(mlx5_devx_cmd_destroy(tmpl->devx_cq));
603 if (tmpl->devx_channel)
604 mlx5_glue->devx_destroy_event_channel(tmpl->devx_channel);
605 rxq_release_devx_rq_resources(rxq_ctrl);
606 rxq_release_devx_cq_resources(rxq_ctrl);
607 rte_errno = ret; /* Restore rte_errno. */
612 * Create an indirection table.
615 * Pointer to Ethernet device.
617 * Queues entering in the indirection table.
619 * Number of queues in the array.
622 * The DevX object initialized, NULL otherwise and rte_errno is set.
624 static struct mlx5_ind_table_obj *
625 mlx5_devx_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
628 struct mlx5_priv *priv = dev->data->dev_private;
629 struct mlx5_ind_table_obj *ind_tbl;
630 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
631 const unsigned int rqt_n = 1 << (rte_is_power_of_2(queues_n) ?
632 log2above(queues_n) :
633 log2above(priv->config.ind_table_max_size));
634 unsigned int i = 0, j = 0, k = 0;
636 ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
637 queues_n * sizeof(uint16_t), 0, SOCKET_ID_ANY);
642 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
643 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
645 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
650 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
651 rqt_attr->rqt_actual_size = rqt_n;
652 for (i = 0; i != queues_n; ++i) {
653 struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
658 rqt_attr->rq_list[i] = rxq->obj->rq->id;
659 ind_tbl->queues[i] = queues[i];
661 k = i; /* Retain value of i for use in error case. */
662 for (j = 0; k != rqt_n; ++k, ++j)
663 rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
664 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
667 DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
672 ind_tbl->queues_n = queues_n;
673 rte_atomic32_inc(&ind_tbl->refcnt);
674 LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
677 for (j = 0; j < i; j++)
678 mlx5_rxq_release(dev, ind_tbl->queues[j]);
680 DEBUG("Port %u cannot create indirection table.", dev->data->port_id);
685 * Destroy the DevX RQT object.
688 * Indirection table to release.
691 mlx5_devx_ind_table_obj_destroy(struct mlx5_ind_table_obj *ind_tbl)
693 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
697 * Create an Rx Hash queue.
700 * Pointer to Ethernet device.
702 * RSS key for the Rx hash queue.
706 * Verbs protocol hash field to make the RSS on.
708 * Queues entering in hash queue. In case of empty hash_fields only the
709 * first queue index will be taken for the indirection table.
716 * The DevX object initialized index, 0 otherwise and rte_errno is set.
719 mlx5_devx_hrxq_new(struct rte_eth_dev *dev,
720 const uint8_t *rss_key, uint32_t rss_key_len,
721 uint64_t hash_fields,
722 const uint16_t *queues, uint32_t queues_n,
723 int tunnel __rte_unused)
725 struct mlx5_priv *priv = dev->data->dev_private;
726 struct mlx5_hrxq *hrxq = NULL;
727 uint32_t hrxq_idx = 0;
728 struct mlx5_ind_table_obj *ind_tbl;
729 struct mlx5_devx_obj *tir = NULL;
730 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
731 struct mlx5_rxq_ctrl *rxq_ctrl =
732 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
733 struct mlx5_devx_tir_attr tir_attr;
738 queues_n = hash_fields ? queues_n : 1;
739 ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
741 ind_tbl = priv->obj_ops->ind_table_obj_new(dev, queues,
747 /* Enable TIR LRO only if all the queues were configured for. */
748 for (i = 0; i < queues_n; ++i) {
749 if (!(*priv->rxqs)[queues[i]]->lro) {
754 memset(&tir_attr, 0, sizeof(tir_attr));
755 tir_attr.disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
756 tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
757 tir_attr.tunneled_offload_en = !!tunnel;
758 /* If needed, translate hash_fields bitmap to PRM format. */
760 struct mlx5_rx_hash_field_select *rx_hash_field_select = NULL;
761 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
762 rx_hash_field_select = hash_fields & IBV_RX_HASH_INNER ?
763 &tir_attr.rx_hash_field_selector_inner :
764 &tir_attr.rx_hash_field_selector_outer;
766 rx_hash_field_select = &tir_attr.rx_hash_field_selector_outer;
768 /* 1 bit: 0: IPv4, 1: IPv6. */
769 rx_hash_field_select->l3_prot_type =
770 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
771 /* 1 bit: 0: TCP, 1: UDP. */
772 rx_hash_field_select->l4_prot_type =
773 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
774 /* Bitmask which sets which fields to use in RX Hash. */
775 rx_hash_field_select->selected_fields =
776 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
777 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
778 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
779 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
780 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
781 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
782 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
783 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
785 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
786 tir_attr.transport_domain = priv->sh->td->id;
788 tir_attr.transport_domain = priv->sh->tdn;
789 memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
790 tir_attr.indirect_table = ind_tbl->rqt->id;
791 if (dev->data->dev_conf.lpbk_mode)
792 tir_attr.self_lb_block = MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
794 tir_attr.lro_timeout_period_usecs = priv->config.lro.timeout;
795 tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
796 tir_attr.lro_enable_mask = MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
797 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
799 tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
801 DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
806 hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
809 hrxq->ind_table = ind_tbl;
811 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
812 hrxq->action = mlx5_glue->dv_create_flow_action_dest_devx_tir
819 hrxq->rss_key_len = rss_key_len;
820 hrxq->hash_fields = hash_fields;
821 memcpy(hrxq->rss_key, rss_key, rss_key_len);
822 rte_atomic32_inc(&hrxq->refcnt);
823 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
827 err = rte_errno; /* Save rte_errno before cleanup. */
828 mlx5_ind_table_obj_release(dev, ind_tbl);
830 claim_zero(mlx5_devx_cmd_destroy(tir));
832 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
833 rte_errno = err; /* Restore rte_errno. */
838 * Destroy a DevX TIR object.
841 * Hash Rx queue to release its tir.
844 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
846 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
849 struct mlx5_obj_ops devx_obj_ops = {
850 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
851 .rxq_obj_new = mlx5_rxq_devx_obj_new,
852 .rxq_event_get = mlx5_rx_devx_get_event,
853 .rxq_obj_modify = mlx5_devx_modify_rq,
854 .rxq_obj_release = mlx5_rxq_devx_obj_release,
855 .ind_table_obj_new = mlx5_devx_ind_table_obj_new,
856 .ind_table_obj_destroy = mlx5_devx_ind_table_obj_destroy,
857 .hrxq_new = mlx5_devx_hrxq_new,
858 .hrxq_destroy = mlx5_devx_tir_destroy,