1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
10 #include <rte_malloc.h>
12 #include <rte_errno.h>
13 #include <rte_bus_pci.h>
15 #include <rte_regexdev_driver.h>
18 #include <infiniband/mlx5dv.h>
19 #include <mlx5_glue.h>
20 #include <mlx5_common.h>
23 #include "mlx5_regex_utils.h"
25 #include "mlx5_regex.h"
27 #define MLX5_REGEX_MAX_WQE_INDEX 0xffff
28 #define MLX5_REGEX_METADATA_SIZE ((size_t)64)
29 #define MLX5_REGEX_MAX_OUTPUT (((size_t)1) << 11)
30 #define MLX5_REGEX_WQE_CTRL_OFFSET 12
31 #define MLX5_REGEX_WQE_METADATA_OFFSET 16
32 #define MLX5_REGEX_WQE_GATHER_OFFSET 32
33 #define MLX5_REGEX_WQE_SCATTER_OFFSET 48
34 #define MLX5_REGEX_METADATA_OFF 32
35 #define MLX5_REGEX_UMR_WQE_SIZE 192
36 /* The maximum KLMs can be added to one UMR indirect mkey. */
37 #define MLX5_REGEX_MAX_KLM_NUM 128
38 /* The KLM array size for one job. */
39 #define MLX5_REGEX_KLMS_SIZE \
40 ((MLX5_REGEX_MAX_KLM_NUM) * sizeof(struct mlx5_klm))
41 /* In WQE set mode, the pi should be quarter of the MLX5_REGEX_MAX_WQE_INDEX. */
42 #define MLX5_REGEX_UMR_QP_PI_IDX(pi, ops) \
43 (((pi) + (ops)) & (MLX5_REGEX_MAX_WQE_INDEX >> 2))
45 static inline uint32_t
46 qp_size_get(struct mlx5_regex_hw_qp *qp)
48 return (1U << qp->log_nb_desc);
51 static inline uint32_t
52 cq_size_get(struct mlx5_regex_cq *cq)
54 return (1U << cq->log_nb_desc);
57 struct mlx5_regex_job {
59 volatile uint8_t *output;
60 volatile uint8_t *metadata;
61 struct mlx5_klm *imkey_array; /* Indirect mkey's KLM array. */
62 struct mlx5_devx_obj *imkey; /* UMR WQE's indirect meky. */
63 } __rte_cached_aligned;
66 set_data_seg(struct mlx5_wqe_data_seg *seg,
67 uint32_t length, uint32_t lkey,
70 seg->byte_count = rte_cpu_to_be_32(length);
71 seg->lkey = rte_cpu_to_be_32(lkey);
72 seg->addr = rte_cpu_to_be_64(address);
76 set_metadata_seg(struct mlx5_wqe_metadata_seg *seg,
77 uint32_t mmo_control_31_0, uint32_t lkey,
80 seg->mmo_control_31_0 = htobe32(mmo_control_31_0);
81 seg->lkey = rte_cpu_to_be_32(lkey);
82 seg->addr = rte_cpu_to_be_64(address);
86 set_regex_ctrl_seg(void *seg, uint8_t le, uint16_t subset_id0,
87 uint16_t subset_id1, uint16_t subset_id2,
88 uint16_t subset_id3, uint8_t ctrl)
90 MLX5_SET(regexp_mmo_control, seg, le, le);
91 MLX5_SET(regexp_mmo_control, seg, ctrl, ctrl);
92 MLX5_SET(regexp_mmo_control, seg, subset_id_0, subset_id0);
93 MLX5_SET(regexp_mmo_control, seg, subset_id_1, subset_id1);
94 MLX5_SET(regexp_mmo_control, seg, subset_id_2, subset_id2);
95 MLX5_SET(regexp_mmo_control, seg, subset_id_3, subset_id3);
99 set_wqe_ctrl_seg(struct mlx5_wqe_ctrl_seg *seg, uint16_t pi, uint8_t opcode,
100 uint8_t opmod, uint32_t qp_num, uint8_t fm_ce_se, uint8_t ds,
101 uint8_t signature, uint32_t imm)
103 seg->opmod_idx_opcode = rte_cpu_to_be_32(((uint32_t)opmod << 24) |
104 ((uint32_t)pi << 8) |
106 seg->qpn_ds = rte_cpu_to_be_32((qp_num << 8) | ds);
107 seg->fm_ce_se = fm_ce_se;
108 seg->signature = signature;
113 __prep_one(struct mlx5_regex_priv *priv, struct mlx5_regex_hw_qp *qp_obj,
114 struct rte_regex_ops *op, struct mlx5_regex_job *job,
115 size_t pi, struct mlx5_klm *klm)
117 size_t wqe_offset = (pi & (qp_size_get(qp_obj) - 1)) *
118 (MLX5_SEND_WQE_BB << (priv->has_umr ? 2 : 0)) +
119 (priv->has_umr ? MLX5_REGEX_UMR_WQE_SIZE : 0);
120 uint16_t group0 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID0_VALID_F ?
122 uint16_t group1 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F ?
124 uint16_t group2 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F ?
126 uint16_t group3 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F ?
128 uint8_t control = op->req_flags &
129 RTE_REGEX_OPS_REQ_MATCH_HIGH_PRIORITY_F ? 1 : 0;
131 /* For backward compatibility. */
132 if (!(op->req_flags & (RTE_REGEX_OPS_REQ_GROUP_ID0_VALID_F |
133 RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F |
134 RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F |
135 RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F)))
136 group0 = op->group_id0;
137 uint8_t *wqe = (uint8_t *)(uintptr_t)qp_obj->qp_obj.wqes + wqe_offset;
138 int ds = 4; /* ctrl + meta + input + output */
140 set_wqe_ctrl_seg((struct mlx5_wqe_ctrl_seg *)wqe,
141 (priv->has_umr ? (pi * 4 + 3) : pi),
142 MLX5_OPCODE_MMO, MLX5_OPC_MOD_MMO_REGEX,
143 qp_obj->qp_obj.qp->id, 0, ds, 0, 0);
144 set_regex_ctrl_seg(wqe + 12, 0, group0, group1, group2, group3,
146 struct mlx5_wqe_data_seg *input_seg =
147 (struct mlx5_wqe_data_seg *)(wqe +
148 MLX5_REGEX_WQE_GATHER_OFFSET);
149 input_seg->byte_count = rte_cpu_to_be_32(klm->byte_count);
150 input_seg->addr = rte_cpu_to_be_64(klm->address);
151 input_seg->lkey = klm->mkey;
152 job->user_id = op->user_id;
156 prep_one(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
157 struct mlx5_regex_hw_qp *qp_obj, struct rte_regex_ops *op,
158 struct mlx5_regex_job *job)
162 klm.byte_count = rte_pktmbuf_data_len(op->mbuf);
163 klm.mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, op->mbuf, 0);
164 klm.address = rte_pktmbuf_mtod(op->mbuf, uintptr_t);
165 __prep_one(priv, qp_obj, op, job, qp_obj->pi, &klm);
166 qp_obj->db_pi = qp_obj->pi;
167 qp_obj->pi = (qp_obj->pi + 1) & MLX5_REGEX_MAX_WQE_INDEX;
171 send_doorbell(struct mlx5_regex_priv *priv, struct mlx5_regex_hw_qp *qp_obj)
173 struct mlx5dv_devx_uar *uar = priv->uar;
174 size_t wqe_offset = (qp_obj->db_pi & (qp_size_get(qp_obj) - 1)) *
175 (MLX5_SEND_WQE_BB << (priv->has_umr ? 2 : 0)) +
176 (priv->has_umr ? MLX5_REGEX_UMR_WQE_SIZE : 0);
177 uint8_t *wqe = (uint8_t *)(uintptr_t)qp_obj->qp_obj.wqes + wqe_offset;
178 /* Or the fm_ce_se instead of set, avoid the fence be cleared. */
179 ((struct mlx5_wqe_ctrl_seg *)wqe)->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
180 uint64_t *doorbell_addr =
181 (uint64_t *)((uint8_t *)uar->base_addr + 0x800);
183 qp_obj->qp_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32((priv->has_umr ?
184 (qp_obj->db_pi * 4 + 3) : qp_obj->db_pi)
185 & MLX5_REGEX_MAX_WQE_INDEX);
187 *doorbell_addr = *(volatile uint64_t *)wqe;
192 get_free(struct mlx5_regex_hw_qp *qp, uint8_t has_umr) {
193 return (qp_size_get(qp) - ((qp->pi - qp->ci) &
194 (has_umr ? (MLX5_REGEX_MAX_WQE_INDEX >> 2) :
195 MLX5_REGEX_MAX_WQE_INDEX)));
198 static inline uint32_t
199 job_id_get(uint32_t qid, size_t qp_size, size_t index) {
200 return qid * qp_size + (index & (qp_size - 1));
203 #ifdef HAVE_MLX5_UMR_IMKEY
205 mkey_klm_available(struct mlx5_klm *klm, uint32_t pos, uint32_t new)
207 return (klm && ((pos + new) <= MLX5_REGEX_MAX_KLM_NUM));
211 complete_umr_wqe(struct mlx5_regex_qp *qp, struct mlx5_regex_hw_qp *qp_obj,
212 struct mlx5_regex_job *mkey_job,
213 size_t umr_index, uint32_t klm_size, uint32_t total_len)
215 size_t wqe_offset = (umr_index & (qp_size_get(qp_obj) - 1)) *
216 (MLX5_SEND_WQE_BB * 4);
217 struct mlx5_wqe_ctrl_seg *wqe = (struct mlx5_wqe_ctrl_seg *)((uint8_t *)
218 (uintptr_t)qp_obj->qp_obj.wqes + wqe_offset);
219 struct mlx5_wqe_umr_ctrl_seg *ucseg =
220 (struct mlx5_wqe_umr_ctrl_seg *)(wqe + 1);
221 struct mlx5_wqe_mkey_context_seg *mkc =
222 (struct mlx5_wqe_mkey_context_seg *)(ucseg + 1);
223 struct mlx5_klm *iklm = (struct mlx5_klm *)(mkc + 1);
224 uint16_t klm_align = RTE_ALIGN(klm_size, 4);
226 memset(wqe, 0, MLX5_REGEX_UMR_WQE_SIZE);
227 /* Set WQE control seg. Non-inline KLM UMR WQE size must be 9 WQE_DS. */
228 set_wqe_ctrl_seg(wqe, (umr_index * 4), MLX5_OPCODE_UMR,
229 0, qp_obj->qp_obj.qp->id, 0, 9, 0,
230 rte_cpu_to_be_32(mkey_job->imkey->id));
231 /* Set UMR WQE control seg. */
232 ucseg->mkey_mask |= rte_cpu_to_be_64(MLX5_WQE_UMR_CTRL_MKEY_MASK_LEN |
233 MLX5_WQE_UMR_CTRL_FLAG_TRNSLATION_OFFSET |
234 MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_LOCAL_WRITE);
235 ucseg->klm_octowords = rte_cpu_to_be_16(klm_align);
236 /* Set mkey context seg. */
237 mkc->len = rte_cpu_to_be_64(total_len);
238 mkc->qpn_mkey = rte_cpu_to_be_32(0xffffff00 |
239 (mkey_job->imkey->id & 0xff));
240 /* Set UMR pointer to data seg. */
241 iklm->address = rte_cpu_to_be_64
242 ((uintptr_t)((char *)mkey_job->imkey_array));
243 iklm->mkey = rte_cpu_to_be_32(qp->imkey_addr->lkey);
244 iklm->byte_count = rte_cpu_to_be_32(klm_align);
245 /* Clear the padding memory. */
246 memset((uint8_t *)&mkey_job->imkey_array[klm_size], 0,
247 sizeof(struct mlx5_klm) * (klm_align - klm_size));
249 /* Add the following RegEx WQE with fence. */
250 wqe = (struct mlx5_wqe_ctrl_seg *)
251 (((uint8_t *)wqe) + MLX5_REGEX_UMR_WQE_SIZE);
252 wqe->fm_ce_se |= MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE;
256 prep_nop_regex_wqe_set(struct mlx5_regex_priv *priv,
257 struct mlx5_regex_hw_qp *qp, struct rte_regex_ops *op,
258 struct mlx5_regex_job *job, size_t pi, struct mlx5_klm *klm)
260 size_t wqe_offset = (pi & (qp_size_get(qp) - 1)) *
261 (MLX5_SEND_WQE_BB << 2);
262 struct mlx5_wqe_ctrl_seg *wqe = (struct mlx5_wqe_ctrl_seg *)((uint8_t *)
263 (uintptr_t)qp->qp_obj.wqes + wqe_offset);
265 /* Clear the WQE memory used as UMR WQE previously. */
266 if ((rte_be_to_cpu_32(wqe->opmod_idx_opcode) & 0xff) != MLX5_OPCODE_NOP)
267 memset(wqe, 0, MLX5_REGEX_UMR_WQE_SIZE);
268 /* UMR WQE size is 9 DS, align nop WQE to 3 WQEBBS(12 DS). */
269 set_wqe_ctrl_seg(wqe, pi * 4, MLX5_OPCODE_NOP, 0, qp->qp_obj.qp->id,
271 __prep_one(priv, qp, op, job, pi, klm);
275 prep_regex_umr_wqe_set(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
276 struct mlx5_regex_hw_qp *qp_obj, struct rte_regex_ops **op,
279 struct mlx5_regex_job *job = NULL;
280 size_t hw_qpid = qp_obj->qpn, mkey_job_id = 0;
281 size_t left_ops = nb_ops;
282 uint32_t klm_num = 0;
284 struct mlx5_klm *mkey_klm = NULL;
289 rte_prefetch0(op[left_ops]);
292 * Build the WQE set by reverse. In case the burst may consume
293 * multiple mkeys, build the WQE set as normal will hard to
294 * address the last mkey index, since we will only know the last
295 * RegEx WQE's index when finishes building.
298 struct rte_mbuf *mbuf = op[left_ops]->mbuf;
299 size_t pi = MLX5_REGEX_UMR_QP_PI_IDX(qp_obj->pi, left_ops);
301 if (mbuf->nb_segs > 1) {
302 size_t scatter_size = 0;
304 if (!mkey_klm_available(mkey_klm, klm_num,
307 * The mkey's KLM is full, create the UMR
308 * WQE in the next WQE set.
311 complete_umr_wqe(qp, qp_obj,
312 &qp->jobs[mkey_job_id],
313 MLX5_REGEX_UMR_QP_PI_IDX(pi, 1),
316 * Get the indircet mkey and KLM array index
317 * from the last WQE set.
319 mkey_job_id = job_id_get(hw_qpid,
320 qp_size_get(qp_obj), pi);
321 mkey_klm = qp->jobs[mkey_job_id].imkey_array;
325 /* Build RegEx WQE's data segment KLM. */
327 klm.mkey = rte_cpu_to_be_32
328 (qp->jobs[mkey_job_id].imkey->id);
330 addr = rte_pktmbuf_mtod(mbuf, uintptr_t);
331 /* Build indirect mkey seg's KLM. */
332 mkey_klm->mkey = mlx5_mr_mb2mr(&qp->mr_ctrl,
334 mkey_klm->address = rte_cpu_to_be_64(addr);
335 mkey_klm->byte_count = rte_cpu_to_be_32
336 (rte_pktmbuf_data_len(mbuf));
338 * Save the mbuf's total size for RegEx data
341 scatter_size += rte_pktmbuf_data_len(mbuf);
347 klm.byte_count = scatter_size;
349 /* The single mubf case. Build the KLM directly. */
350 klm.mkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf, 0);
351 klm.address = rte_pktmbuf_mtod(mbuf, uintptr_t);
352 klm.byte_count = rte_pktmbuf_data_len(mbuf);
354 job = &qp->jobs[job_id_get(hw_qpid, qp_size_get(qp_obj), pi)];
356 * Build the nop + RegEx WQE set by default. The fist nop WQE
357 * will be updated later as UMR WQE if scattered mubf exist.
359 prep_nop_regex_wqe_set(priv, qp_obj, op[left_ops], job, pi,
363 * Scattered mbuf have been added to the KLM array. Complete the build
364 * of UMR WQE, update the first nop WQE as UMR WQE.
367 complete_umr_wqe(qp, qp_obj, &qp->jobs[mkey_job_id], qp_obj->pi,
369 qp_obj->db_pi = MLX5_REGEX_UMR_QP_PI_IDX(qp_obj->pi, nb_ops - 1);
370 qp_obj->pi = MLX5_REGEX_UMR_QP_PI_IDX(qp_obj->pi, nb_ops);
374 mlx5_regexdev_enqueue_gga(struct rte_regexdev *dev, uint16_t qp_id,
375 struct rte_regex_ops **ops, uint16_t nb_ops)
377 struct mlx5_regex_priv *priv = dev->data->dev_private;
378 struct mlx5_regex_qp *queue = &priv->qps[qp_id];
379 struct mlx5_regex_hw_qp *qp_obj;
380 size_t hw_qpid, nb_left = nb_ops, nb_desc;
382 while ((hw_qpid = ffs(queue->free_qps))) {
383 hw_qpid--; /* ffs returns 1 for bit 0 */
384 qp_obj = &queue->qps[hw_qpid];
385 nb_desc = get_free(qp_obj, priv->has_umr);
387 /* The ops be handled can't exceed nb_ops. */
388 if (nb_desc > nb_left)
391 queue->free_qps &= ~(1 << hw_qpid);
392 prep_regex_umr_wqe_set(priv, queue, qp_obj, ops,
394 send_doorbell(priv, qp_obj);
408 mlx5_regexdev_enqueue(struct rte_regexdev *dev, uint16_t qp_id,
409 struct rte_regex_ops **ops, uint16_t nb_ops)
411 struct mlx5_regex_priv *priv = dev->data->dev_private;
412 struct mlx5_regex_qp *queue = &priv->qps[qp_id];
413 struct mlx5_regex_hw_qp *qp_obj;
414 size_t hw_qpid, job_id, i = 0;
416 while ((hw_qpid = ffs(queue->free_qps))) {
417 hw_qpid--; /* ffs returns 1 for bit 0 */
418 qp_obj = &queue->qps[hw_qpid];
419 while (get_free(qp_obj, priv->has_umr)) {
420 job_id = job_id_get(hw_qpid, qp_size_get(qp_obj),
422 prep_one(priv, queue, qp_obj, ops[i],
423 &queue->jobs[job_id]);
425 if (unlikely(i == nb_ops)) {
426 send_doorbell(priv, qp_obj);
430 queue->free_qps &= ~(1 << hw_qpid);
431 send_doorbell(priv, qp_obj);
439 #define MLX5_REGEX_RESP_SZ 8
442 extract_result(struct rte_regex_ops *op, struct mlx5_regex_job *job)
448 op->user_id = job->user_id;
449 op->nb_matches = MLX5_GET_VOLATILE(regexp_metadata, job->metadata +
450 MLX5_REGEX_METADATA_OFF,
452 op->nb_actual_matches = MLX5_GET_VOLATILE(regexp_metadata,
454 MLX5_REGEX_METADATA_OFF,
455 detected_match_count);
456 for (j = 0; j < op->nb_matches; j++) {
457 offset = MLX5_REGEX_RESP_SZ * j;
458 op->matches[j].rule_id =
459 MLX5_GET_VOLATILE(regexp_match_tuple,
460 (job->output + offset), rule_id);
461 op->matches[j].start_offset =
462 MLX5_GET_VOLATILE(regexp_match_tuple,
463 (job->output + offset), start_ptr);
465 MLX5_GET_VOLATILE(regexp_match_tuple,
466 (job->output + offset), length);
468 status = MLX5_GET_VOLATILE(regexp_metadata, job->metadata +
469 MLX5_REGEX_METADATA_OFF,
472 if (status & MLX5_RXP_RESP_STATUS_PMI_SOJ)
473 op->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F;
474 if (status & MLX5_RXP_RESP_STATUS_PMI_EOJ)
475 op->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F;
476 if (status & MLX5_RXP_RESP_STATUS_MAX_LATENCY)
477 op->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F;
478 if (status & MLX5_RXP_RESP_STATUS_MAX_MATCH)
479 op->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F;
480 if (status & MLX5_RXP_RESP_STATUS_MAX_PREFIX)
481 op->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F;
482 if (status & MLX5_RXP_RESP_STATUS_MAX_PRI_THREADS)
483 op->rsp_flags |= RTE_REGEX_OPS_RSP_RESOURCE_LIMIT_REACHED_F;
484 if (status & MLX5_RXP_RESP_STATUS_MAX_SEC_THREADS)
485 op->rsp_flags |= RTE_REGEX_OPS_RSP_RESOURCE_LIMIT_REACHED_F;
488 static inline volatile struct mlx5_cqe *
489 poll_one(struct mlx5_regex_cq *cq)
491 volatile struct mlx5_cqe *cqe;
492 size_t next_cqe_offset;
494 next_cqe_offset = (cq->ci & (cq_size_get(cq) - 1));
495 cqe = (volatile struct mlx5_cqe *)(cq->cq_obj.cqes + next_cqe_offset);
498 int ret = check_cqe(cqe, cq_size_get(cq), cq->ci);
500 if (unlikely(ret == MLX5_CQE_STATUS_ERR)) {
501 DRV_LOG(ERR, "Completion with error on qp 0x%x", 0);
505 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN))
513 * DPDK callback for dequeue.
516 * Pointer to the regex dev structure.
518 * The queue to enqueue the traffic to.
520 * List of regex ops to dequeue.
522 * Number of ops in ops parameter.
525 * Number of packets successfully dequeued (<= pkts_n).
528 mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id,
529 struct rte_regex_ops **ops, uint16_t nb_ops)
531 struct mlx5_regex_priv *priv = dev->data->dev_private;
532 struct mlx5_regex_qp *queue = &priv->qps[qp_id];
533 struct mlx5_regex_cq *cq = &queue->cq;
534 volatile struct mlx5_cqe *cqe;
537 while ((cqe = poll_one(cq))) {
539 = (rte_be_to_cpu_16(cqe->wqe_counter) + 1) &
540 MLX5_REGEX_MAX_WQE_INDEX;
541 size_t hw_qpid = cqe->user_index_bytes[2];
542 struct mlx5_regex_hw_qp *qp_obj = &queue->qps[hw_qpid];
544 /* UMR mode WQE counter move as WQE set(4 WQEBBS).*/
547 while (qp_obj->ci != wq_counter) {
548 if (unlikely(i == nb_ops)) {
549 /* Return without updating cq->ci */
552 uint32_t job_id = job_id_get(hw_qpid,
553 qp_size_get(qp_obj), qp_obj->ci);
554 extract_result(ops[i], &queue->jobs[job_id]);
555 qp_obj->ci = (qp_obj->ci + 1) & (priv->has_umr ?
556 (MLX5_REGEX_MAX_WQE_INDEX >> 2) :
557 MLX5_REGEX_MAX_WQE_INDEX);
560 cq->ci = (cq->ci + 1) & 0xffffff;
562 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->ci);
563 queue->free_qps |= (1 << hw_qpid);
572 setup_qps(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *queue)
574 size_t hw_qpid, entry;
576 for (hw_qpid = 0; hw_qpid < queue->nb_obj; hw_qpid++) {
577 struct mlx5_regex_hw_qp *qp_obj = &queue->qps[hw_qpid];
578 uint8_t *wqe = (uint8_t *)(uintptr_t)qp_obj->qp_obj.wqes;
579 for (entry = 0 ; entry < qp_size_get(qp_obj); entry++) {
580 job_id = hw_qpid * qp_size_get(qp_obj) + entry;
581 struct mlx5_regex_job *job = &queue->jobs[job_id];
583 /* Fill UMR WQE with NOP in advanced. */
586 ((struct mlx5_wqe_ctrl_seg *)wqe,
587 entry * 2, MLX5_OPCODE_NOP, 0,
588 qp_obj->qp_obj.qp->id, 0, 12, 0, 0);
589 wqe += MLX5_REGEX_UMR_WQE_SIZE;
591 set_metadata_seg((struct mlx5_wqe_metadata_seg *)
592 (wqe + MLX5_REGEX_WQE_METADATA_OFFSET),
593 0, queue->metadata->lkey,
594 (uintptr_t)job->metadata);
595 set_data_seg((struct mlx5_wqe_data_seg *)
596 (wqe + MLX5_REGEX_WQE_SCATTER_OFFSET),
597 MLX5_REGEX_MAX_OUTPUT,
598 queue->outputs->lkey,
599 (uintptr_t)job->output);
602 queue->free_qps |= 1 << hw_qpid;
607 setup_buffers(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp)
609 struct ibv_pd *pd = priv->cdev->pd;
613 void *ptr = rte_calloc(__func__, qp->nb_desc,
614 MLX5_REGEX_METADATA_SIZE,
615 MLX5_REGEX_METADATA_SIZE);
619 qp->metadata = mlx5_glue->reg_mr(pd, ptr,
620 MLX5_REGEX_METADATA_SIZE * qp->nb_desc,
621 IBV_ACCESS_LOCAL_WRITE);
623 DRV_LOG(ERR, "Failed to register metadata");
628 ptr = rte_calloc(__func__, qp->nb_desc,
629 MLX5_REGEX_MAX_OUTPUT,
630 MLX5_REGEX_MAX_OUTPUT);
635 qp->outputs = mlx5_glue->reg_mr(pd, ptr,
636 MLX5_REGEX_MAX_OUTPUT * qp->nb_desc,
637 IBV_ACCESS_LOCAL_WRITE);
640 DRV_LOG(ERR, "Failed to register output");
646 ptr = rte_calloc(__func__, qp->nb_desc, MLX5_REGEX_KLMS_SIZE,
647 MLX5_REGEX_KLMS_SIZE);
652 qp->imkey_addr = mlx5_glue->reg_mr(pd, ptr,
653 MLX5_REGEX_KLMS_SIZE * qp->nb_desc,
654 IBV_ACCESS_LOCAL_WRITE);
655 if (!qp->imkey_addr) {
657 DRV_LOG(ERR, "Failed to register output");
663 /* distribute buffers to jobs */
664 for (i = 0; i < qp->nb_desc; i++) {
666 (uint8_t *)qp->outputs->addr +
667 (i % qp->nb_desc) * MLX5_REGEX_MAX_OUTPUT;
668 qp->jobs[i].metadata =
669 (uint8_t *)qp->metadata->addr +
670 (i % qp->nb_desc) * MLX5_REGEX_METADATA_SIZE;
672 qp->jobs[i].imkey_array = (struct mlx5_klm *)
673 qp->imkey_addr->addr +
674 (i % qp->nb_desc) * MLX5_REGEX_MAX_KLM_NUM;
680 ptr = qp->outputs->addr;
682 mlx5_glue->dereg_mr(qp->outputs);
684 ptr = qp->metadata->addr;
686 mlx5_glue->dereg_mr(qp->metadata);
691 mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id)
693 struct mlx5_regex_qp *qp = &priv->qps[qp_id];
694 struct mlx5_klm klm = { 0 };
695 struct mlx5_devx_mkey_attr attr = {
703 qp->jobs = rte_calloc(__func__, qp->nb_desc, sizeof(*qp->jobs), 64);
706 err = setup_buffers(priv, qp);
716 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
717 attr.pd = priv->cdev->pdn;
719 for (i = 0; i < qp->nb_desc; i++) {
720 attr.klm_num = MLX5_REGEX_MAX_KLM_NUM;
721 attr.klm_array = qp->jobs[i].imkey_array;
722 qp->jobs[i].imkey = mlx5_devx_cmd_mkey_create
723 (priv->cdev->ctx, &attr);
724 if (!qp->jobs[i].imkey) {
726 DRV_LOG(ERR, "Failed to allocate imkey.");
727 mlx5_regexdev_teardown_fastpath(priv, qp_id);
735 free_buffers(struct mlx5_regex_qp *qp)
737 if (qp->imkey_addr) {
738 mlx5_glue->dereg_mr(qp->imkey_addr);
739 rte_free(qp->imkey_addr->addr);
742 mlx5_glue->dereg_mr(qp->metadata);
743 rte_free(qp->metadata->addr);
746 mlx5_glue->dereg_mr(qp->outputs);
747 rte_free(qp->outputs->addr);
752 mlx5_regexdev_teardown_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id)
754 struct mlx5_regex_qp *qp = &priv->qps[qp_id];
758 for (i = 0; i < qp->nb_desc; i++) {
759 if (qp->jobs[i].imkey)
760 claim_zero(mlx5_devx_cmd_destroy
761 (qp->jobs[i].imkey));