1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
10 #include <sys/queue.h>
12 #include <rte_malloc.h>
13 #include <rte_common.h>
14 #include <rte_eal_paging.h>
16 #include <mlx5_glue.h>
17 #include <mlx5_devx_cmds.h>
18 #include <mlx5_common_devx.h>
19 #include <mlx5_malloc.h>
22 #include "mlx5_common_os.h"
23 #include "mlx5_rxtx.h"
24 #include "mlx5_utils.h"
25 #include "mlx5_devx.h"
26 #include "mlx5_flow.h"
27 #include "mlx5_flow_os.h"
30 * Modify RQ vlan stripping offload
36 * 0 on success, non-0 otherwise
39 mlx5_rxq_obj_modify_rq_vlan_strip(struct mlx5_rxq_obj *rxq_obj, int on)
41 struct mlx5_devx_modify_rq_attr rq_attr;
43 memset(&rq_attr, 0, sizeof(rq_attr));
44 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
45 rq_attr.state = MLX5_RQC_STATE_RDY;
46 rq_attr.vsd = (on ? 0 : 1);
47 rq_attr.modify_bitmask = MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD;
48 return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);
52 * Modify RQ using DevX API.
55 * DevX Rx queue object.
57 * Type of change queue state.
60 * 0 on success, a negative errno value otherwise and rte_errno is set.
63 mlx5_devx_modify_rq(struct mlx5_rxq_obj *rxq_obj, uint8_t type)
65 struct mlx5_devx_modify_rq_attr rq_attr;
67 memset(&rq_attr, 0, sizeof(rq_attr));
69 case MLX5_RXQ_MOD_ERR2RST:
70 rq_attr.rq_state = MLX5_RQC_STATE_ERR;
71 rq_attr.state = MLX5_RQC_STATE_RST;
73 case MLX5_RXQ_MOD_RST2RDY:
74 rq_attr.rq_state = MLX5_RQC_STATE_RST;
75 rq_attr.state = MLX5_RQC_STATE_RDY;
77 case MLX5_RXQ_MOD_RDY2ERR:
78 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
79 rq_attr.state = MLX5_RQC_STATE_ERR;
81 case MLX5_RXQ_MOD_RDY2RST:
82 rq_attr.rq_state = MLX5_RQC_STATE_RDY;
83 rq_attr.state = MLX5_RQC_STATE_RST;
88 return mlx5_devx_cmd_modify_rq(rxq_obj->rq_obj.rq, &rq_attr);
92 * Modify SQ using DevX API.
95 * DevX Tx queue object.
97 * Type of change queue state.
102 * 0 on success, a negative errno value otherwise and rte_errno is set.
105 mlx5_devx_modify_sq(struct mlx5_txq_obj *obj, enum mlx5_txq_modify_type type,
108 struct mlx5_devx_modify_sq_attr msq_attr = { 0 };
111 if (type != MLX5_TXQ_MOD_RST2RDY) {
112 /* Change queue state to reset. */
113 if (type == MLX5_TXQ_MOD_ERR2RDY)
114 msq_attr.sq_state = MLX5_SQC_STATE_ERR;
116 msq_attr.sq_state = MLX5_SQC_STATE_RDY;
117 msq_attr.state = MLX5_SQC_STATE_RST;
118 ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr);
120 DRV_LOG(ERR, "Cannot change the Tx SQ state to RESET"
121 " %s", strerror(errno));
126 if (type != MLX5_TXQ_MOD_RDY2RST) {
127 /* Change queue state to ready. */
128 msq_attr.sq_state = MLX5_SQC_STATE_RST;
129 msq_attr.state = MLX5_SQC_STATE_RDY;
130 ret = mlx5_devx_cmd_modify_sq(obj->sq_obj.sq, &msq_attr);
132 DRV_LOG(ERR, "Cannot change the Tx SQ state to READY"
133 " %s", strerror(errno));
139 * The dev_port variable is relevant only in Verbs API, and there is a
140 * pointer that points to this function and a parallel function in verbs
141 * intermittently, so they should have the same parameters.
148 * Destroy the Rx queue DevX object.
151 * Rxq object to destroy.
154 mlx5_rxq_release_devx_resources(struct mlx5_rxq_obj *rxq_obj)
156 mlx5_devx_rq_destroy(&rxq_obj->rq_obj);
157 memset(&rxq_obj->rq_obj, 0, sizeof(rxq_obj->rq_obj));
158 mlx5_devx_cq_destroy(&rxq_obj->cq_obj);
159 memset(&rxq_obj->cq_obj, 0, sizeof(rxq_obj->cq_obj));
163 * Release an Rx DevX queue object.
166 * DevX Rx queue object.
169 mlx5_rxq_devx_obj_release(struct mlx5_rxq_obj *rxq_obj)
171 MLX5_ASSERT(rxq_obj);
172 if (rxq_obj->rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN) {
173 MLX5_ASSERT(rxq_obj->rq);
174 mlx5_devx_modify_rq(rxq_obj, MLX5_RXQ_MOD_RDY2RST);
175 claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
177 MLX5_ASSERT(rxq_obj->cq_obj.cq);
178 MLX5_ASSERT(rxq_obj->rq_obj.rq);
179 mlx5_rxq_release_devx_resources(rxq_obj);
180 if (rxq_obj->devx_channel)
181 mlx5_os_devx_destroy_event_channel
182 (rxq_obj->devx_channel);
187 * Get event for an Rx DevX queue object.
190 * DevX Rx queue object.
193 * 0 on success, a negative errno value otherwise and rte_errno is set.
196 mlx5_rx_devx_get_event(struct mlx5_rxq_obj *rxq_obj)
198 #ifdef HAVE_IBV_DEVX_EVENT
200 struct mlx5dv_devx_async_event_hdr event_resp;
201 uint8_t buf[sizeof(struct mlx5dv_devx_async_event_hdr) + 128];
203 int ret = mlx5_glue->devx_get_event(rxq_obj->devx_channel,
211 if (out.event_resp.cookie != (uint64_t)(uintptr_t)rxq_obj->cq_obj.cq) {
220 #endif /* HAVE_IBV_DEVX_EVENT */
224 * Create a RQ object using DevX.
227 * Pointer to Ethernet device.
229 * Queue index in DPDK Rx queue array.
232 * 0 on success, a negative errno value otherwise and rte_errno is set.
235 mlx5_rxq_create_devx_rq_resources(struct rte_eth_dev *dev, uint16_t idx)
237 struct mlx5_priv *priv = dev->data->dev_private;
238 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
239 struct mlx5_rxq_ctrl *rxq_ctrl =
240 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
241 struct mlx5_devx_create_rq_attr rq_attr = { 0 };
242 uint16_t log_desc_n = rxq_data->elts_n - rxq_data->sges_n;
243 uint32_t wqe_size, log_wqe_size;
245 /* Fill RQ attributes. */
246 rq_attr.mem_rq_type = MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE;
247 rq_attr.flush_in_error_en = 1;
248 rq_attr.vsd = (rxq_data->vlan_strip) ? 0 : 1;
249 rq_attr.cqn = rxq_ctrl->obj->cq_obj.cq->id;
250 rq_attr.scatter_fcs = (rxq_data->crc_present) ? 1 : 0;
251 /* Fill WQ attributes for this RQ. */
252 if (mlx5_rxq_mprq_enabled(rxq_data)) {
253 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC_STRIDING_RQ;
255 * Number of strides in each WQE:
256 * 512*2^single_wqe_log_num_of_strides.
258 rq_attr.wq_attr.single_wqe_log_num_of_strides =
259 rxq_data->strd_num_n -
260 MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES;
261 /* Stride size = (2^single_stride_log_num_of_bytes)*64B. */
262 rq_attr.wq_attr.single_stride_log_num_of_bytes =
263 rxq_data->strd_sz_n -
264 MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
265 wqe_size = sizeof(struct mlx5_wqe_mprq);
267 rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
268 wqe_size = sizeof(struct mlx5_wqe_data_seg);
270 log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
271 wqe_size = 1 << log_wqe_size; /* round up power of two.*/
272 rq_attr.wq_attr.log_wq_stride = log_wqe_size;
273 rq_attr.wq_attr.log_wq_sz = log_desc_n;
274 rq_attr.wq_attr.end_padding_mode = priv->config.hw_padding ?
275 MLX5_WQ_END_PAD_MODE_ALIGN :
276 MLX5_WQ_END_PAD_MODE_NONE;
277 rq_attr.wq_attr.pd = priv->sh->pdn;
278 rq_attr.counter_set_id = priv->counter_set_id;
279 /* Create RQ using DevX API. */
280 return mlx5_devx_rq_create(priv->sh->ctx, &rxq_ctrl->obj->rq_obj,
281 wqe_size, log_desc_n, &rq_attr,
286 * Create a DevX CQ object for an Rx queue.
289 * Pointer to Ethernet device.
291 * Queue index in DPDK Rx queue array.
294 * 0 on success, a negative errno value otherwise and rte_errno is set.
297 mlx5_rxq_create_devx_cq_resources(struct rte_eth_dev *dev, uint16_t idx)
299 struct mlx5_devx_cq *cq_obj = 0;
300 struct mlx5_devx_cq_attr cq_attr = { 0 };
301 struct mlx5_priv *priv = dev->data->dev_private;
302 struct mlx5_dev_ctx_shared *sh = priv->sh;
303 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
304 struct mlx5_rxq_ctrl *rxq_ctrl =
305 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
306 unsigned int cqe_n = mlx5_rxq_cqe_num(rxq_data);
308 uint16_t event_nums[1] = { 0 };
311 if (priv->config.cqe_comp && !rxq_data->hw_timestamp &&
313 cq_attr.cqe_comp_en = 1u;
314 rxq_data->mcqe_format = priv->config.cqe_comp_fmt;
315 rxq_data->byte_mask = UINT32_MAX;
316 switch (priv->config.cqe_comp_fmt) {
317 case MLX5_CQE_RESP_FORMAT_HASH:
319 case MLX5_CQE_RESP_FORMAT_CSUM:
321 * Select CSUM miniCQE format only for non-vectorized
322 * MPRQ Rx burst, use HASH miniCQE format for others.
324 if (mlx5_rxq_check_vec_support(rxq_data) < 0 &&
325 mlx5_rxq_mprq_enabled(rxq_data))
326 cq_attr.mini_cqe_res_format =
327 MLX5_CQE_RESP_FORMAT_CSUM_STRIDX;
329 cq_attr.mini_cqe_res_format =
330 MLX5_CQE_RESP_FORMAT_HASH;
331 rxq_data->mcqe_format = cq_attr.mini_cqe_res_format;
333 case MLX5_CQE_RESP_FORMAT_FTAG_STRIDX:
334 rxq_data->byte_mask = MLX5_LEN_WITH_MARK_MASK;
336 case MLX5_CQE_RESP_FORMAT_CSUM_STRIDX:
337 cq_attr.mini_cqe_res_format = priv->config.cqe_comp_fmt;
339 case MLX5_CQE_RESP_FORMAT_L34H_STRIDX:
340 cq_attr.mini_cqe_res_format = 0;
341 cq_attr.mini_cqe_res_format_ext = 1;
345 "Port %u Rx CQE compression is enabled, format %d.",
346 dev->data->port_id, priv->config.cqe_comp_fmt);
348 * For vectorized Rx, it must not be doubled in order to
349 * make cq_ci and rq_ci aligned.
351 if (mlx5_rxq_check_vec_support(rxq_data) < 0)
353 } else if (priv->config.cqe_comp && rxq_data->hw_timestamp) {
355 "Port %u Rx CQE compression is disabled for HW"
358 } else if (priv->config.cqe_comp && rxq_data->lro) {
360 "Port %u Rx CQE compression is disabled for LRO.",
363 cq_attr.uar_page_id = mlx5_os_get_devx_uar_page_id(sh->devx_rx_uar);
364 log_cqe_n = log2above(cqe_n);
365 /* Create CQ using DevX API. */
366 ret = mlx5_devx_cq_create(sh->ctx, &rxq_ctrl->obj->cq_obj, log_cqe_n,
367 &cq_attr, sh->numa_node);
370 cq_obj = &rxq_ctrl->obj->cq_obj;
371 rxq_data->cqes = (volatile struct mlx5_cqe (*)[])
372 (uintptr_t)cq_obj->cqes;
373 rxq_data->cq_db = cq_obj->db_rec;
374 rxq_data->cq_uar = mlx5_os_get_devx_uar_base_addr(sh->devx_rx_uar);
375 rxq_data->cqe_n = log_cqe_n;
376 rxq_data->cqn = cq_obj->cq->id;
377 if (rxq_ctrl->obj->devx_channel) {
378 ret = mlx5_os_devx_subscribe_devx_event
379 (rxq_ctrl->obj->devx_channel,
383 (uint64_t)(uintptr_t)cq_obj->cq);
385 DRV_LOG(ERR, "Fail to subscribe CQ to event channel.");
387 mlx5_devx_cq_destroy(cq_obj);
388 memset(cq_obj, 0, sizeof(*cq_obj));
397 * Create the Rx hairpin queue object.
400 * Pointer to Ethernet device.
402 * Queue index in DPDK Rx queue array.
405 * 0 on success, a negative errno value otherwise and rte_errno is set.
408 mlx5_rxq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
410 struct mlx5_priv *priv = dev->data->dev_private;
411 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
412 struct mlx5_rxq_ctrl *rxq_ctrl =
413 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
414 struct mlx5_devx_create_rq_attr attr = { 0 };
415 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
416 uint32_t max_wq_data;
418 MLX5_ASSERT(rxq_data);
420 tmpl->rxq_ctrl = rxq_ctrl;
422 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
423 /* Jumbo frames > 9KB should be supported, and more packets. */
424 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
425 if (priv->config.log_hp_size > max_wq_data) {
426 DRV_LOG(ERR, "Total data size %u power of 2 is "
427 "too large for hairpin.",
428 priv->config.log_hp_size);
432 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
434 attr.wq_attr.log_hairpin_data_sz =
435 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
436 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
438 /* Set the packets number to the maximum value for performance. */
439 attr.wq_attr.log_hairpin_num_packets =
440 attr.wq_attr.log_hairpin_data_sz -
441 MLX5_HAIRPIN_QUEUE_STRIDE;
442 attr.counter_set_id = priv->counter_set_id;
443 tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
447 "Port %u Rx hairpin queue %u can't create rq object.",
448 dev->data->port_id, idx);
452 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_HAIRPIN;
457 * Create the Rx queue DevX object.
460 * Pointer to Ethernet device.
462 * Queue index in DPDK Rx queue array.
465 * 0 on success, a negative errno value otherwise and rte_errno is set.
468 mlx5_rxq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
470 struct mlx5_priv *priv = dev->data->dev_private;
471 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
472 struct mlx5_rxq_ctrl *rxq_ctrl =
473 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
474 struct mlx5_rxq_obj *tmpl = rxq_ctrl->obj;
477 MLX5_ASSERT(rxq_data);
479 if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
480 return mlx5_rxq_obj_hairpin_new(dev, idx);
481 tmpl->rxq_ctrl = rxq_ctrl;
484 MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA;
486 tmpl->devx_channel = mlx5_os_devx_create_event_channel
489 if (!tmpl->devx_channel) {
491 DRV_LOG(ERR, "Failed to create event channel %d.",
495 tmpl->fd = mlx5_os_get_devx_channel_fd(tmpl->devx_channel);
497 /* Create CQ using DevX API. */
498 ret = mlx5_rxq_create_devx_cq_resources(dev, idx);
500 DRV_LOG(ERR, "Failed to create CQ.");
503 /* Create RQ using DevX API. */
504 ret = mlx5_rxq_create_devx_rq_resources(dev, idx);
506 DRV_LOG(ERR, "Port %u Rx queue %u RQ creation failure.",
507 dev->data->port_id, idx);
511 /* Change queue state to ready. */
512 ret = mlx5_devx_modify_rq(tmpl, MLX5_RXQ_MOD_RST2RDY);
515 rxq_data->wqes = (void *)(uintptr_t)tmpl->rq_obj.umem_buf;
516 rxq_data->rq_db = (uint32_t *)(uintptr_t)tmpl->rq_obj.db_rec;
517 rxq_data->cq_arm_sn = 0;
519 mlx5_rxq_initialize(rxq_data);
520 dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
521 rxq_ctrl->wqn = tmpl->rq_obj.rq->id;
524 ret = rte_errno; /* Save rte_errno before cleanup. */
525 mlx5_rxq_devx_obj_release(tmpl);
526 rte_errno = ret; /* Restore rte_errno. */
531 * Prepare RQT attribute structure for DevX RQT API.
534 * Pointer to Ethernet device.
536 * Log of number of queues in the array.
538 * DevX indirection table object.
541 * The RQT attr object initialized, NULL otherwise and rte_errno is set.
543 static struct mlx5_devx_rqt_attr *
544 mlx5_devx_ind_table_create_rqt_attr(struct rte_eth_dev *dev,
545 const unsigned int log_n,
546 const uint16_t *queues,
547 const uint32_t queues_n)
549 struct mlx5_priv *priv = dev->data->dev_private;
550 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
551 const unsigned int rqt_n = 1 << log_n;
554 rqt_attr = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*rqt_attr) +
555 rqt_n * sizeof(uint32_t), 0, SOCKET_ID_ANY);
557 DRV_LOG(ERR, "Port %u cannot allocate RQT resources.",
562 rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
563 rqt_attr->rqt_actual_size = rqt_n;
564 for (i = 0; i != queues_n; ++i) {
565 struct mlx5_rxq_data *rxq = (*priv->rxqs)[queues[i]];
566 struct mlx5_rxq_ctrl *rxq_ctrl =
567 container_of(rxq, struct mlx5_rxq_ctrl, rxq);
569 rqt_attr->rq_list[i] = rxq_ctrl->obj->rq_obj.rq->id;
572 for (j = 0; i != rqt_n; ++j, ++i)
573 rqt_attr->rq_list[i] = rqt_attr->rq_list[j];
578 * Create RQT using DevX API as a filed of indirection table.
581 * Pointer to Ethernet device.
583 * Log of number of queues in the array.
585 * DevX indirection table object.
588 * 0 on success, a negative errno value otherwise and rte_errno is set.
591 mlx5_devx_ind_table_new(struct rte_eth_dev *dev, const unsigned int log_n,
592 struct mlx5_ind_table_obj *ind_tbl)
594 struct mlx5_priv *priv = dev->data->dev_private;
595 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
597 MLX5_ASSERT(ind_tbl);
598 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
603 ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx, rqt_attr);
606 DRV_LOG(ERR, "Port %u cannot create DevX RQT.",
615 * Modify RQT using DevX API as a filed of indirection table.
618 * Pointer to Ethernet device.
620 * Log of number of queues in the array.
622 * DevX indirection table object.
625 * 0 on success, a negative errno value otherwise and rte_errno is set.
628 mlx5_devx_ind_table_modify(struct rte_eth_dev *dev, const unsigned int log_n,
629 const uint16_t *queues, const uint32_t queues_n,
630 struct mlx5_ind_table_obj *ind_tbl)
633 struct mlx5_devx_rqt_attr *rqt_attr = NULL;
635 MLX5_ASSERT(ind_tbl);
636 rqt_attr = mlx5_devx_ind_table_create_rqt_attr(dev, log_n,
641 ret = mlx5_devx_cmd_modify_rqt(ind_tbl->rqt, rqt_attr);
644 DRV_LOG(ERR, "Port %u cannot modify DevX RQT.",
650 * Destroy the DevX RQT object.
653 * Indirection table to release.
656 mlx5_devx_ind_table_destroy(struct mlx5_ind_table_obj *ind_tbl)
658 claim_zero(mlx5_devx_cmd_destroy(ind_tbl->rqt));
662 * Set TIR attribute struct with relevant input values.
665 * Pointer to Ethernet device.
667 * RSS key for the Rx hash queue.
668 * @param[in] hash_fields
669 * Verbs protocol hash field to make the RSS on.
671 * Indirection table for TIR.
674 * @param[out] tir_attr
675 * Parameters structure for TIR creation/modification.
678 * The Verbs/DevX object initialised index, 0 otherwise and rte_errno is set.
681 mlx5_devx_tir_attr_set(struct rte_eth_dev *dev, const uint8_t *rss_key,
682 uint64_t hash_fields,
683 const struct mlx5_ind_table_obj *ind_tbl,
684 int tunnel, struct mlx5_devx_tir_attr *tir_attr)
686 struct mlx5_priv *priv = dev->data->dev_private;
687 struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[ind_tbl->queues[0]];
688 struct mlx5_rxq_ctrl *rxq_ctrl =
689 container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
690 enum mlx5_rxq_type rxq_obj_type = rxq_ctrl->type;
694 /* Enable TIR LRO only if all the queues were configured for. */
695 for (i = 0; i < ind_tbl->queues_n; ++i) {
696 if (!(*priv->rxqs)[ind_tbl->queues[i]]->lro) {
701 memset(tir_attr, 0, sizeof(*tir_attr));
702 tir_attr->disp_type = MLX5_TIRC_DISP_TYPE_INDIRECT;
703 tir_attr->rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
704 tir_attr->tunneled_offload_en = !!tunnel;
705 /* If needed, translate hash_fields bitmap to PRM format. */
707 struct mlx5_rx_hash_field_select *rx_hash_field_select =
708 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
709 hash_fields & IBV_RX_HASH_INNER ?
710 &tir_attr->rx_hash_field_selector_inner :
712 &tir_attr->rx_hash_field_selector_outer;
713 /* 1 bit: 0: IPv4, 1: IPv6. */
714 rx_hash_field_select->l3_prot_type =
715 !!(hash_fields & MLX5_IPV6_IBV_RX_HASH);
716 /* 1 bit: 0: TCP, 1: UDP. */
717 rx_hash_field_select->l4_prot_type =
718 !!(hash_fields & MLX5_UDP_IBV_RX_HASH);
719 /* Bitmask which sets which fields to use in RX Hash. */
720 rx_hash_field_select->selected_fields =
721 ((!!(hash_fields & MLX5_L3_SRC_IBV_RX_HASH)) <<
722 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
723 (!!(hash_fields & MLX5_L3_DST_IBV_RX_HASH)) <<
724 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP |
725 (!!(hash_fields & MLX5_L4_SRC_IBV_RX_HASH)) <<
726 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT |
727 (!!(hash_fields & MLX5_L4_DST_IBV_RX_HASH)) <<
728 MLX5_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT;
730 if (rxq_obj_type == MLX5_RXQ_TYPE_HAIRPIN)
731 tir_attr->transport_domain = priv->sh->td->id;
733 tir_attr->transport_domain = priv->sh->tdn;
734 memcpy(tir_attr->rx_hash_toeplitz_key, rss_key, MLX5_RSS_HASH_KEY_LEN);
735 tir_attr->indirect_table = ind_tbl->rqt->id;
736 if (dev->data->dev_conf.lpbk_mode)
737 tir_attr->self_lb_block =
738 MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
740 tir_attr->lro_timeout_period_usecs = priv->config.lro.timeout;
741 tir_attr->lro_max_msg_sz = priv->max_lro_msg_size;
742 tir_attr->lro_enable_mask =
743 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
744 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO;
749 * Create an Rx Hash queue.
752 * Pointer to Ethernet device.
754 * Pointer to Rx Hash queue.
759 * 0 on success, a negative errno value otherwise and rte_errno is set.
762 mlx5_devx_hrxq_new(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
763 int tunnel __rte_unused)
765 struct mlx5_priv *priv = dev->data->dev_private;
766 struct mlx5_devx_tir_attr tir_attr = {0};
769 mlx5_devx_tir_attr_set(dev, hrxq->rss_key, hrxq->hash_fields,
770 hrxq->ind_table, tunnel, &tir_attr);
771 hrxq->tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);
773 DRV_LOG(ERR, "Port %u cannot create DevX TIR.",
778 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
779 if (mlx5_flow_os_create_flow_action_dest_devx_tir(hrxq->tir,
787 err = rte_errno; /* Save rte_errno before cleanup. */
789 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
790 rte_errno = err; /* Restore rte_errno. */
795 * Destroy a DevX TIR object.
798 * Hash Rx queue to release its tir.
801 mlx5_devx_tir_destroy(struct mlx5_hrxq *hrxq)
803 claim_zero(mlx5_devx_cmd_destroy(hrxq->tir));
807 * Modify an Rx Hash queue configuration.
810 * Pointer to Ethernet device.
812 * Hash Rx queue to modify.
814 * RSS key for the Rx hash queue.
816 * Verbs protocol hash field to make the RSS on.
818 * Indirection table for TIR.
821 * 0 on success, a negative errno value otherwise and rte_errno is set.
824 mlx5_devx_hrxq_modify(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq,
825 const uint8_t *rss_key,
826 uint64_t hash_fields,
827 const struct mlx5_ind_table_obj *ind_tbl)
829 struct mlx5_devx_modify_tir_attr modify_tir = {0};
832 * untested for modification fields:
833 * - rx_hash_symmetric not set in hrxq_new(),
834 * - rx_hash_fn set hard-coded in hrxq_new(),
835 * - lro_xxx not set after rxq setup
837 if (ind_tbl != hrxq->ind_table)
838 modify_tir.modify_bitmask |=
839 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_INDIRECT_TABLE;
840 if (hash_fields != hrxq->hash_fields ||
841 memcmp(hrxq->rss_key, rss_key, MLX5_RSS_HASH_KEY_LEN))
842 modify_tir.modify_bitmask |=
843 MLX5_MODIFY_TIR_IN_MODIFY_BITMASK_HASH;
844 mlx5_devx_tir_attr_set(dev, rss_key, hash_fields, ind_tbl,
845 0, /* N/A - tunnel modification unsupported */
847 modify_tir.tirn = hrxq->tir->id;
848 if (mlx5_devx_cmd_modify_tir(hrxq->tir, &modify_tir)) {
849 DRV_LOG(ERR, "port %u cannot modify DevX TIR",
858 * Create a DevX drop action for Rx Hash queue.
861 * Pointer to Ethernet device.
864 * 0 on success, a negative errno value otherwise and rte_errno is set.
867 mlx5_devx_drop_action_create(struct rte_eth_dev *dev)
870 DRV_LOG(ERR, "DevX drop action is not supported yet.");
876 * Release a drop hash Rx queue.
879 * Pointer to Ethernet device.
882 mlx5_devx_drop_action_destroy(struct rte_eth_dev *dev)
885 DRV_LOG(ERR, "DevX drop action is not supported yet.");
890 * Create the Tx hairpin queue object.
893 * Pointer to Ethernet device.
895 * Queue index in DPDK Tx queue array.
898 * 0 on success, a negative errno value otherwise and rte_errno is set.
901 mlx5_txq_obj_hairpin_new(struct rte_eth_dev *dev, uint16_t idx)
903 struct mlx5_priv *priv = dev->data->dev_private;
904 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
905 struct mlx5_txq_ctrl *txq_ctrl =
906 container_of(txq_data, struct mlx5_txq_ctrl, txq);
907 struct mlx5_devx_create_sq_attr attr = { 0 };
908 struct mlx5_txq_obj *tmpl = txq_ctrl->obj;
909 uint32_t max_wq_data;
911 MLX5_ASSERT(txq_data);
913 tmpl->txq_ctrl = txq_ctrl;
916 max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
917 /* Jumbo frames > 9KB should be supported, and more packets. */
918 if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
919 if (priv->config.log_hp_size > max_wq_data) {
920 DRV_LOG(ERR, "Total data size %u power of 2 is "
921 "too large for hairpin.",
922 priv->config.log_hp_size);
926 attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
928 attr.wq_attr.log_hairpin_data_sz =
929 (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
930 max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
932 /* Set the packets number to the maximum value for performance. */
933 attr.wq_attr.log_hairpin_num_packets =
934 attr.wq_attr.log_hairpin_data_sz -
935 MLX5_HAIRPIN_QUEUE_STRIDE;
936 attr.tis_num = priv->sh->tis->id;
937 tmpl->sq = mlx5_devx_cmd_create_sq(priv->sh->ctx, &attr);
940 "Port %u tx hairpin queue %u can't create SQ object.",
941 dev->data->port_id, idx);
948 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
950 * Destroy the Tx queue DevX object.
953 * Txq object to destroy.
956 mlx5_txq_release_devx_resources(struct mlx5_txq_obj *txq_obj)
958 mlx5_devx_sq_destroy(&txq_obj->sq_obj);
959 memset(&txq_obj->sq_obj, 0, sizeof(txq_obj->sq_obj));
960 mlx5_devx_cq_destroy(&txq_obj->cq_obj);
961 memset(&txq_obj->cq_obj, 0, sizeof(txq_obj->cq_obj));
965 * Create a SQ object and its resources using DevX.
968 * Pointer to Ethernet device.
970 * Queue index in DPDK Tx queue array.
971 * @param[in] log_desc_n
972 * Log of number of descriptors in queue.
975 * 0 on success, a negative errno value otherwise and rte_errno is set.
978 mlx5_txq_create_devx_sq_resources(struct rte_eth_dev *dev, uint16_t idx,
981 struct mlx5_priv *priv = dev->data->dev_private;
982 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
983 struct mlx5_txq_ctrl *txq_ctrl =
984 container_of(txq_data, struct mlx5_txq_ctrl, txq);
985 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
986 struct mlx5_devx_create_sq_attr sq_attr = {
987 .flush_in_error_en = 1,
988 .allow_multi_pkt_send_wqe = !!priv->config.mps,
989 .min_wqe_inline_mode = priv->config.hca_attr.vport_inline_mode,
990 .allow_swp = !!priv->config.swp,
991 .cqn = txq_obj->cq_obj.cq->id,
993 .tis_num = priv->sh->tis->id,
994 .wq_attr = (struct mlx5_devx_wq_attr){
997 mlx5_os_get_devx_uar_page_id(priv->sh->tx_uar),
999 .ts_format = mlx5_ts_format_conv(priv->sh->sq_ts_format),
1001 /* Create Send Queue object with DevX. */
1002 return mlx5_devx_sq_create(priv->sh->ctx, &txq_obj->sq_obj, log_desc_n,
1003 &sq_attr, priv->sh->numa_node);
1008 * Create the Tx queue DevX object.
1011 * Pointer to Ethernet device.
1013 * Queue index in DPDK Tx queue array.
1016 * 0 on success, a negative errno value otherwise and rte_errno is set.
1019 mlx5_txq_devx_obj_new(struct rte_eth_dev *dev, uint16_t idx)
1021 struct mlx5_priv *priv = dev->data->dev_private;
1022 struct mlx5_txq_data *txq_data = (*priv->txqs)[idx];
1023 struct mlx5_txq_ctrl *txq_ctrl =
1024 container_of(txq_data, struct mlx5_txq_ctrl, txq);
1026 if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN)
1027 return mlx5_txq_obj_hairpin_new(dev, idx);
1028 #if !defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) && defined(HAVE_INFINIBAND_VERBS_H)
1029 DRV_LOG(ERR, "Port %u Tx queue %u cannot create with DevX, no UAR.",
1030 dev->data->port_id, idx);
1034 struct mlx5_dev_ctx_shared *sh = priv->sh;
1035 struct mlx5_txq_obj *txq_obj = txq_ctrl->obj;
1036 struct mlx5_devx_cq_attr cq_attr = {
1037 .uar_page_id = mlx5_os_get_devx_uar_page_id(sh->tx_uar),
1040 uint32_t cqe_n, log_desc_n;
1041 uint32_t wqe_n, wqe_size;
1044 MLX5_ASSERT(txq_data);
1045 MLX5_ASSERT(txq_obj);
1046 txq_obj->txq_ctrl = txq_ctrl;
1048 cqe_n = (1UL << txq_data->elts_n) / MLX5_TX_COMP_THRESH +
1049 1 + MLX5_TX_COMP_THRESH_INLINE_DIV;
1050 log_desc_n = log2above(cqe_n);
1051 cqe_n = 1UL << log_desc_n;
1052 if (cqe_n > UINT16_MAX) {
1053 DRV_LOG(ERR, "Port %u Tx queue %u requests to many CQEs %u.",
1054 dev->data->port_id, txq_data->idx, cqe_n);
1058 /* Create completion queue object with DevX. */
1059 ret = mlx5_devx_cq_create(sh->ctx, &txq_obj->cq_obj, log_desc_n,
1060 &cq_attr, priv->sh->numa_node);
1062 DRV_LOG(ERR, "Port %u Tx queue %u CQ creation failure.",
1063 dev->data->port_id, idx);
1066 txq_data->cqe_n = log_desc_n;
1067 txq_data->cqe_s = cqe_n;
1068 txq_data->cqe_m = txq_data->cqe_s - 1;
1069 txq_data->cqes = txq_obj->cq_obj.cqes;
1070 txq_data->cq_ci = 0;
1071 txq_data->cq_pi = 0;
1072 txq_data->cq_db = txq_obj->cq_obj.db_rec;
1073 *txq_data->cq_db = 0;
1075 * Adjust the amount of WQEs depending on inline settings.
1076 * The number of descriptors should be enough to handle
1077 * the specified number of packets. If queue is being created
1078 * with Verbs the rdma-core does queue size adjustment
1079 * internally in the mlx5_calc_sq_size(), we do the same
1080 * for the queue being created with DevX at this point.
1082 wqe_size = txq_data->tso_en ?
1083 RTE_ALIGN(txq_ctrl->max_tso_header, MLX5_WSEG_SIZE) : 0;
1084 wqe_size += sizeof(struct mlx5_wqe_cseg) +
1085 sizeof(struct mlx5_wqe_eseg) +
1086 sizeof(struct mlx5_wqe_dseg);
1087 if (txq_data->inlen_send)
1088 wqe_size = RTE_MAX(wqe_size, sizeof(struct mlx5_wqe_cseg) +
1089 sizeof(struct mlx5_wqe_eseg) +
1090 RTE_ALIGN(txq_data->inlen_send +
1093 wqe_size = RTE_ALIGN(wqe_size, MLX5_WQE_SIZE) / MLX5_WQE_SIZE;
1094 /* Create Send Queue object with DevX. */
1095 wqe_n = RTE_MIN((1UL << txq_data->elts_n) * wqe_size,
1096 (uint32_t)priv->sh->device_attr.max_qp_wr);
1097 log_desc_n = log2above(wqe_n);
1098 ret = mlx5_txq_create_devx_sq_resources(dev, idx, log_desc_n);
1100 DRV_LOG(ERR, "Port %u Tx queue %u SQ creation failure.",
1101 dev->data->port_id, idx);
1105 /* Create the Work Queue. */
1106 txq_data->wqe_n = log_desc_n;
1107 txq_data->wqe_s = 1 << txq_data->wqe_n;
1108 txq_data->wqe_m = txq_data->wqe_s - 1;
1109 txq_data->wqes = (struct mlx5_wqe *)(uintptr_t)txq_obj->sq_obj.wqes;
1110 txq_data->wqes_end = txq_data->wqes + txq_data->wqe_s;
1111 txq_data->wqe_ci = 0;
1112 txq_data->wqe_pi = 0;
1113 txq_data->wqe_comp = 0;
1114 txq_data->wqe_thres = txq_data->wqe_s / MLX5_TX_COMP_THRESH_INLINE_DIV;
1115 txq_data->qp_db = txq_obj->sq_obj.db_rec;
1116 *txq_data->qp_db = 0;
1117 txq_data->qp_num_8s = txq_obj->sq_obj.sq->id << 8;
1118 /* Change Send Queue state to Ready-to-Send. */
1119 ret = mlx5_devx_modify_sq(txq_obj, MLX5_TXQ_MOD_RST2RDY, 0);
1123 "Port %u Tx queue %u SQ state to SQC_STATE_RDY failed.",
1124 dev->data->port_id, idx);
1127 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
1129 * If using DevX need to query and store TIS transport domain value.
1130 * This is done once per port.
1131 * Will use this value on Rx, when creating matching TIR.
1134 priv->sh->tdn = priv->sh->td->id;
1136 MLX5_ASSERT(sh->tx_uar);
1137 reg_addr = mlx5_os_get_devx_uar_reg_addr(sh->tx_uar);
1138 MLX5_ASSERT(reg_addr);
1139 txq_ctrl->bf_reg = reg_addr;
1140 txq_ctrl->uar_mmap_offset =
1141 mlx5_os_get_devx_uar_mmap_offset(sh->tx_uar);
1142 txq_uar_init(txq_ctrl);
1143 dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
1146 ret = rte_errno; /* Save rte_errno before cleanup. */
1147 mlx5_txq_release_devx_resources(txq_obj);
1148 rte_errno = ret; /* Restore rte_errno. */
1154 * Release an Tx DevX queue object.
1157 * DevX Tx queue object.
1160 mlx5_txq_devx_obj_release(struct mlx5_txq_obj *txq_obj)
1162 MLX5_ASSERT(txq_obj);
1163 if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) {
1165 claim_zero(mlx5_devx_cmd_destroy(txq_obj->tis));
1166 #if defined(HAVE_MLX5DV_DEVX_UAR_OFFSET) || !defined(HAVE_INFINIBAND_VERBS_H)
1168 mlx5_txq_release_devx_resources(txq_obj);
1173 struct mlx5_obj_ops devx_obj_ops = {
1174 .rxq_obj_modify_vlan_strip = mlx5_rxq_obj_modify_rq_vlan_strip,
1175 .rxq_obj_new = mlx5_rxq_devx_obj_new,
1176 .rxq_event_get = mlx5_rx_devx_get_event,
1177 .rxq_obj_modify = mlx5_devx_modify_rq,
1178 .rxq_obj_release = mlx5_rxq_devx_obj_release,
1179 .ind_table_new = mlx5_devx_ind_table_new,
1180 .ind_table_modify = mlx5_devx_ind_table_modify,
1181 .ind_table_destroy = mlx5_devx_ind_table_destroy,
1182 .hrxq_new = mlx5_devx_hrxq_new,
1183 .hrxq_destroy = mlx5_devx_tir_destroy,
1184 .hrxq_modify = mlx5_devx_hrxq_modify,
1185 .drop_action_create = mlx5_devx_drop_action_create,
1186 .drop_action_destroy = mlx5_devx_drop_action_destroy,
1187 .txq_obj_new = mlx5_txq_devx_obj_new,
1188 .txq_obj_modify = mlx5_devx_modify_sq,
1189 .txq_obj_release = mlx5_txq_devx_obj_release,