1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
10 #include <rte_malloc.h>
12 #include <rte_errno.h>
13 #include <rte_bus_pci.h>
15 #include <rte_regexdev_driver.h>
18 #include <infiniband/mlx5dv.h>
19 #include <mlx5_glue.h>
20 #include <mlx5_common.h>
23 #include "mlx5_regex_utils.h"
25 #include "mlx5_regex.h"
27 #define MLX5_REGEX_MAX_WQE_INDEX 0xffff
28 #define MLX5_REGEX_METADATA_SIZE ((size_t)64)
29 #define MLX5_REGEX_MAX_OUTPUT (((size_t)1) << 11)
30 #define MLX5_REGEX_WQE_CTRL_OFFSET 12
31 #define MLX5_REGEX_WQE_METADATA_OFFSET 16
32 #define MLX5_REGEX_WQE_GATHER_OFFSET 32
33 #define MLX5_REGEX_WQE_SCATTER_OFFSET 48
34 #define MLX5_REGEX_METADATA_OFF 32
35 #define MLX5_REGEX_UMR_WQE_SIZE 192
36 /* The maximum KLMs can be added to one UMR indirect mkey. */
37 #define MLX5_REGEX_MAX_KLM_NUM 128
38 /* The KLM array size for one job. */
39 #define MLX5_REGEX_KLMS_SIZE \
40 ((MLX5_REGEX_MAX_KLM_NUM) * sizeof(struct mlx5_klm))
41 /* In WQE set mode, the pi should be quarter of the MLX5_REGEX_MAX_WQE_INDEX. */
42 #define MLX5_REGEX_UMR_SQ_PI_IDX(pi, ops) \
43 (((pi) + (ops)) & (MLX5_REGEX_MAX_WQE_INDEX >> 2))
45 static inline uint32_t
46 sq_size_get(struct mlx5_regex_sq *sq)
48 return (1U << sq->log_nb_desc);
51 static inline uint32_t
52 cq_size_get(struct mlx5_regex_cq *cq)
54 return (1U << cq->log_nb_desc);
57 struct mlx5_regex_job {
59 volatile uint8_t *output;
60 volatile uint8_t *metadata;
61 struct mlx5_klm *imkey_array; /* Indirect mkey's KLM array. */
62 struct mlx5_devx_obj *imkey; /* UMR WQE's indirect meky. */
63 } __rte_cached_aligned;
66 set_data_seg(struct mlx5_wqe_data_seg *seg,
67 uint32_t length, uint32_t lkey,
70 seg->byte_count = rte_cpu_to_be_32(length);
71 seg->lkey = rte_cpu_to_be_32(lkey);
72 seg->addr = rte_cpu_to_be_64(address);
76 set_metadata_seg(struct mlx5_wqe_metadata_seg *seg,
77 uint32_t mmo_control_31_0, uint32_t lkey,
80 seg->mmo_control_31_0 = htobe32(mmo_control_31_0);
81 seg->lkey = rte_cpu_to_be_32(lkey);
82 seg->addr = rte_cpu_to_be_64(address);
86 set_regex_ctrl_seg(void *seg, uint8_t le, uint16_t subset_id0,
87 uint16_t subset_id1, uint16_t subset_id2,
88 uint16_t subset_id3, uint8_t ctrl)
90 MLX5_SET(regexp_mmo_control, seg, le, le);
91 MLX5_SET(regexp_mmo_control, seg, ctrl, ctrl);
92 MLX5_SET(regexp_mmo_control, seg, subset_id_0, subset_id0);
93 MLX5_SET(regexp_mmo_control, seg, subset_id_1, subset_id1);
94 MLX5_SET(regexp_mmo_control, seg, subset_id_2, subset_id2);
95 MLX5_SET(regexp_mmo_control, seg, subset_id_3, subset_id3);
99 set_wqe_ctrl_seg(struct mlx5_wqe_ctrl_seg *seg, uint16_t pi, uint8_t opcode,
100 uint8_t opmod, uint32_t qp_num, uint8_t fm_ce_se, uint8_t ds,
101 uint8_t signature, uint32_t imm)
103 seg->opmod_idx_opcode = rte_cpu_to_be_32(((uint32_t)opmod << 24) |
104 ((uint32_t)pi << 8) |
106 seg->qpn_ds = rte_cpu_to_be_32((qp_num << 8) | ds);
107 seg->fm_ce_se = fm_ce_se;
108 seg->signature = signature;
113 * Query LKey from a packet buffer for QP. If not found, add the mempool.
116 * Pointer to the priv object.
118 * Pointer to per-queue MR control structure.
120 * Pointer to source mbuf, to search in.
123 * Searched LKey on success, UINT32_MAX on no match.
125 static inline uint32_t
126 mlx5_regex_addr2mr(struct mlx5_regex_priv *priv, struct mlx5_mr_ctrl *mr_ctrl,
127 struct rte_mbuf *mbuf)
129 uintptr_t addr = rte_pktmbuf_mtod(mbuf, uintptr_t);
132 /* Check generation bit to see if there's any change on existing MRs. */
133 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
134 mlx5_mr_flush_local_cache(mr_ctrl);
135 /* Linear search on MR cache array. */
136 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
137 MLX5_MR_CACHE_N, addr);
138 if (likely(lkey != UINT32_MAX))
140 /* Take slower bottom-half on miss. */
141 return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
142 !!(mbuf->ol_flags & EXT_ATTACHED_MBUF));
147 __prep_one(struct mlx5_regex_priv *priv, struct mlx5_regex_sq *sq,
148 struct rte_regex_ops *op, struct mlx5_regex_job *job,
149 size_t pi, struct mlx5_klm *klm)
151 size_t wqe_offset = (pi & (sq_size_get(sq) - 1)) *
152 (MLX5_SEND_WQE_BB << (priv->has_umr ? 2 : 0)) +
153 (priv->has_umr ? MLX5_REGEX_UMR_WQE_SIZE : 0);
154 uint16_t group0 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID0_VALID_F ?
156 uint16_t group1 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F ?
158 uint16_t group2 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F ?
160 uint16_t group3 = op->req_flags & RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F ?
162 uint8_t control = op->req_flags &
163 RTE_REGEX_OPS_REQ_MATCH_HIGH_PRIORITY_F ? 1 : 0;
165 /* For backward compatibility. */
166 if (!(op->req_flags & (RTE_REGEX_OPS_REQ_GROUP_ID0_VALID_F |
167 RTE_REGEX_OPS_REQ_GROUP_ID1_VALID_F |
168 RTE_REGEX_OPS_REQ_GROUP_ID2_VALID_F |
169 RTE_REGEX_OPS_REQ_GROUP_ID3_VALID_F)))
170 group0 = op->group_id0;
171 uint8_t *wqe = (uint8_t *)(uintptr_t)sq->sq_obj.wqes + wqe_offset;
172 int ds = 4; /* ctrl + meta + input + output */
174 set_wqe_ctrl_seg((struct mlx5_wqe_ctrl_seg *)wqe,
175 (priv->has_umr ? (pi * 4 + 3) : pi),
176 MLX5_OPCODE_MMO, MLX5_OPC_MOD_MMO_REGEX,
177 sq->sq_obj.sq->id, 0, ds, 0, 0);
178 set_regex_ctrl_seg(wqe + 12, 0, group0, group1, group2, group3,
180 struct mlx5_wqe_data_seg *input_seg =
181 (struct mlx5_wqe_data_seg *)(wqe +
182 MLX5_REGEX_WQE_GATHER_OFFSET);
183 input_seg->byte_count = rte_cpu_to_be_32(klm->byte_count);
184 input_seg->addr = rte_cpu_to_be_64(klm->address);
185 input_seg->lkey = klm->mkey;
186 job->user_id = op->user_id;
190 prep_one(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
191 struct mlx5_regex_sq *sq, struct rte_regex_ops *op,
192 struct mlx5_regex_job *job)
196 klm.byte_count = rte_pktmbuf_data_len(op->mbuf);
197 klm.mkey = mlx5_regex_addr2mr(priv, &qp->mr_ctrl, op->mbuf);
198 klm.address = rte_pktmbuf_mtod(op->mbuf, uintptr_t);
199 __prep_one(priv, sq, op, job, sq->pi, &klm);
201 sq->pi = (sq->pi + 1) & MLX5_REGEX_MAX_WQE_INDEX;
205 send_doorbell(struct mlx5_regex_priv *priv, struct mlx5_regex_sq *sq)
207 struct mlx5dv_devx_uar *uar = priv->uar;
208 size_t wqe_offset = (sq->db_pi & (sq_size_get(sq) - 1)) *
209 (MLX5_SEND_WQE_BB << (priv->has_umr ? 2 : 0)) +
210 (priv->has_umr ? MLX5_REGEX_UMR_WQE_SIZE : 0);
211 uint8_t *wqe = (uint8_t *)(uintptr_t)sq->sq_obj.wqes + wqe_offset;
212 /* Or the fm_ce_se instead of set, avoid the fence be cleared. */
213 ((struct mlx5_wqe_ctrl_seg *)wqe)->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
214 uint64_t *doorbell_addr =
215 (uint64_t *)((uint8_t *)uar->base_addr + 0x800);
217 sq->sq_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32((priv->has_umr ?
218 (sq->db_pi * 4 + 3) : sq->db_pi) &
219 MLX5_REGEX_MAX_WQE_INDEX);
221 *doorbell_addr = *(volatile uint64_t *)wqe;
226 get_free(struct mlx5_regex_sq *sq, uint8_t has_umr) {
227 return (sq_size_get(sq) - ((sq->pi - sq->ci) &
228 (has_umr ? (MLX5_REGEX_MAX_WQE_INDEX >> 2) :
229 MLX5_REGEX_MAX_WQE_INDEX)));
232 static inline uint32_t
233 job_id_get(uint32_t qid, size_t sq_size, size_t index) {
234 return qid * sq_size + (index & (sq_size - 1));
237 #ifdef HAVE_MLX5_UMR_IMKEY
239 mkey_klm_available(struct mlx5_klm *klm, uint32_t pos, uint32_t new)
241 return (klm && ((pos + new) <= MLX5_REGEX_MAX_KLM_NUM));
245 complete_umr_wqe(struct mlx5_regex_qp *qp, struct mlx5_regex_sq *sq,
246 struct mlx5_regex_job *mkey_job,
247 size_t umr_index, uint32_t klm_size, uint32_t total_len)
249 size_t wqe_offset = (umr_index & (sq_size_get(sq) - 1)) *
250 (MLX5_SEND_WQE_BB * 4);
251 struct mlx5_wqe_ctrl_seg *wqe = (struct mlx5_wqe_ctrl_seg *)((uint8_t *)
252 (uintptr_t)sq->sq_obj.wqes + wqe_offset);
253 struct mlx5_wqe_umr_ctrl_seg *ucseg =
254 (struct mlx5_wqe_umr_ctrl_seg *)(wqe + 1);
255 struct mlx5_wqe_mkey_context_seg *mkc =
256 (struct mlx5_wqe_mkey_context_seg *)(ucseg + 1);
257 struct mlx5_klm *iklm = (struct mlx5_klm *)(mkc + 1);
258 uint16_t klm_align = RTE_ALIGN(klm_size, 4);
260 memset(wqe, 0, MLX5_REGEX_UMR_WQE_SIZE);
261 /* Set WQE control seg. Non-inline KLM UMR WQE size must be 9 WQE_DS. */
262 set_wqe_ctrl_seg(wqe, (umr_index * 4), MLX5_OPCODE_UMR,
263 0, sq->sq_obj.sq->id, 0, 9, 0,
264 rte_cpu_to_be_32(mkey_job->imkey->id));
265 /* Set UMR WQE control seg. */
266 ucseg->mkey_mask |= rte_cpu_to_be_64(MLX5_WQE_UMR_CTRL_MKEY_MASK_LEN |
267 MLX5_WQE_UMR_CTRL_FLAG_TRNSLATION_OFFSET |
268 MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_LOCAL_WRITE);
269 ucseg->klm_octowords = rte_cpu_to_be_16(klm_align);
270 /* Set mkey context seg. */
271 mkc->len = rte_cpu_to_be_64(total_len);
272 mkc->qpn_mkey = rte_cpu_to_be_32(0xffffff00 |
273 (mkey_job->imkey->id & 0xff));
274 /* Set UMR pointer to data seg. */
275 iklm->address = rte_cpu_to_be_64
276 ((uintptr_t)((char *)mkey_job->imkey_array));
277 iklm->mkey = rte_cpu_to_be_32(qp->imkey_addr->lkey);
278 iklm->byte_count = rte_cpu_to_be_32(klm_align);
279 /* Clear the padding memory. */
280 memset((uint8_t *)&mkey_job->imkey_array[klm_size], 0,
281 sizeof(struct mlx5_klm) * (klm_align - klm_size));
283 /* Add the following RegEx WQE with fence. */
284 wqe = (struct mlx5_wqe_ctrl_seg *)
285 (((uint8_t *)wqe) + MLX5_REGEX_UMR_WQE_SIZE);
286 wqe->fm_ce_se |= MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE;
290 prep_nop_regex_wqe_set(struct mlx5_regex_priv *priv, struct mlx5_regex_sq *sq,
291 struct rte_regex_ops *op, struct mlx5_regex_job *job,
292 size_t pi, struct mlx5_klm *klm)
294 size_t wqe_offset = (pi & (sq_size_get(sq) - 1)) *
295 (MLX5_SEND_WQE_BB << 2);
296 struct mlx5_wqe_ctrl_seg *wqe = (struct mlx5_wqe_ctrl_seg *)((uint8_t *)
297 (uintptr_t)sq->sq_obj.wqes + wqe_offset);
299 /* Clear the WQE memory used as UMR WQE previously. */
300 if ((rte_be_to_cpu_32(wqe->opmod_idx_opcode) & 0xff) != MLX5_OPCODE_NOP)
301 memset(wqe, 0, MLX5_REGEX_UMR_WQE_SIZE);
302 /* UMR WQE size is 9 DS, align nop WQE to 3 WQEBBS(12 DS). */
303 set_wqe_ctrl_seg(wqe, pi * 4, MLX5_OPCODE_NOP, 0, sq->sq_obj.sq->id,
305 __prep_one(priv, sq, op, job, pi, klm);
309 prep_regex_umr_wqe_set(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp,
310 struct mlx5_regex_sq *sq, struct rte_regex_ops **op, size_t nb_ops)
312 struct mlx5_regex_job *job = NULL;
313 size_t sqid = sq->sqn, mkey_job_id = 0;
314 size_t left_ops = nb_ops;
315 uint32_t klm_num = 0;
317 struct mlx5_klm *mkey_klm = NULL;
322 rte_prefetch0(op[left_ops]);
325 * Build the WQE set by reverse. In case the burst may consume
326 * multiple mkeys, build the WQE set as normal will hard to
327 * address the last mkey index, since we will only know the last
328 * RegEx WQE's index when finishes building.
331 struct rte_mbuf *mbuf = op[left_ops]->mbuf;
332 size_t pi = MLX5_REGEX_UMR_SQ_PI_IDX(sq->pi, left_ops);
334 if (mbuf->nb_segs > 1) {
335 size_t scatter_size = 0;
337 if (!mkey_klm_available(mkey_klm, klm_num,
340 * The mkey's KLM is full, create the UMR
341 * WQE in the next WQE set.
344 complete_umr_wqe(qp, sq,
345 &qp->jobs[mkey_job_id],
346 MLX5_REGEX_UMR_SQ_PI_IDX(pi, 1),
349 * Get the indircet mkey and KLM array index
350 * from the last WQE set.
352 mkey_job_id = job_id_get(sqid,
353 sq_size_get(sq), pi);
354 mkey_klm = qp->jobs[mkey_job_id].imkey_array;
358 /* Build RegEx WQE's data segment KLM. */
360 klm.mkey = rte_cpu_to_be_32
361 (qp->jobs[mkey_job_id].imkey->id);
363 /* Build indirect mkey seg's KLM. */
364 mkey_klm->mkey = mlx5_regex_addr2mr
365 (priv, &qp->mr_ctrl, mbuf);
366 mkey_klm->address = rte_cpu_to_be_64
367 (rte_pktmbuf_mtod(mbuf, uintptr_t));
368 mkey_klm->byte_count = rte_cpu_to_be_32
369 (rte_pktmbuf_data_len(mbuf));
371 * Save the mbuf's total size for RegEx data
374 scatter_size += rte_pktmbuf_data_len(mbuf);
380 klm.byte_count = scatter_size;
382 /* The single mubf case. Build the KLM directly. */
383 klm.mkey = mlx5_regex_addr2mr(priv, &qp->mr_ctrl, mbuf);
384 klm.address = rte_pktmbuf_mtod(mbuf, uintptr_t);
385 klm.byte_count = rte_pktmbuf_data_len(mbuf);
387 job = &qp->jobs[job_id_get(sqid, sq_size_get(sq), pi)];
389 * Build the nop + RegEx WQE set by default. The fist nop WQE
390 * will be updated later as UMR WQE if scattered mubf exist.
392 prep_nop_regex_wqe_set(priv, sq, op[left_ops], job, pi, &klm);
395 * Scattered mbuf have been added to the KLM array. Complete the build
396 * of UMR WQE, update the first nop WQE as UMR WQE.
399 complete_umr_wqe(qp, sq, &qp->jobs[mkey_job_id], sq->pi,
401 sq->db_pi = MLX5_REGEX_UMR_SQ_PI_IDX(sq->pi, nb_ops - 1);
402 sq->pi = MLX5_REGEX_UMR_SQ_PI_IDX(sq->pi, nb_ops);
406 mlx5_regexdev_enqueue_gga(struct rte_regexdev *dev, uint16_t qp_id,
407 struct rte_regex_ops **ops, uint16_t nb_ops)
409 struct mlx5_regex_priv *priv = dev->data->dev_private;
410 struct mlx5_regex_qp *queue = &priv->qps[qp_id];
411 struct mlx5_regex_sq *sq;
412 size_t sqid, nb_left = nb_ops, nb_desc;
414 while ((sqid = ffs(queue->free_sqs))) {
415 sqid--; /* ffs returns 1 for bit 0 */
416 sq = &queue->sqs[sqid];
417 nb_desc = get_free(sq, priv->has_umr);
419 /* The ops be handled can't exceed nb_ops. */
420 if (nb_desc > nb_left)
423 queue->free_sqs &= ~(1 << sqid);
424 prep_regex_umr_wqe_set(priv, queue, sq, ops, nb_desc);
425 send_doorbell(priv, sq);
439 mlx5_regexdev_enqueue(struct rte_regexdev *dev, uint16_t qp_id,
440 struct rte_regex_ops **ops, uint16_t nb_ops)
442 struct mlx5_regex_priv *priv = dev->data->dev_private;
443 struct mlx5_regex_qp *queue = &priv->qps[qp_id];
444 struct mlx5_regex_sq *sq;
445 size_t sqid, job_id, i = 0;
447 while ((sqid = ffs(queue->free_sqs))) {
448 sqid--; /* ffs returns 1 for bit 0 */
449 sq = &queue->sqs[sqid];
450 while (get_free(sq, priv->has_umr)) {
451 job_id = job_id_get(sqid, sq_size_get(sq), sq->pi);
452 prep_one(priv, queue, sq, ops[i], &queue->jobs[job_id]);
454 if (unlikely(i == nb_ops)) {
455 send_doorbell(priv, sq);
459 queue->free_sqs &= ~(1 << sqid);
460 send_doorbell(priv, sq);
468 #define MLX5_REGEX_RESP_SZ 8
471 extract_result(struct rte_regex_ops *op, struct mlx5_regex_job *job)
477 op->user_id = job->user_id;
478 op->nb_matches = MLX5_GET_VOLATILE(regexp_metadata, job->metadata +
479 MLX5_REGEX_METADATA_OFF,
481 op->nb_actual_matches = MLX5_GET_VOLATILE(regexp_metadata,
483 MLX5_REGEX_METADATA_OFF,
484 detected_match_count);
485 for (j = 0; j < op->nb_matches; j++) {
486 offset = MLX5_REGEX_RESP_SZ * j;
487 op->matches[j].rule_id =
488 MLX5_GET_VOLATILE(regexp_match_tuple,
489 (job->output + offset), rule_id);
490 op->matches[j].start_offset =
491 MLX5_GET_VOLATILE(regexp_match_tuple,
492 (job->output + offset), start_ptr);
494 MLX5_GET_VOLATILE(regexp_match_tuple,
495 (job->output + offset), length);
497 status = MLX5_GET_VOLATILE(regexp_metadata, job->metadata +
498 MLX5_REGEX_METADATA_OFF,
501 if (status & MLX5_RXP_RESP_STATUS_PMI_SOJ)
502 op->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_SOJ_F;
503 if (status & MLX5_RXP_RESP_STATUS_PMI_EOJ)
504 op->rsp_flags |= RTE_REGEX_OPS_RSP_PMI_EOJ_F;
505 if (status & MLX5_RXP_RESP_STATUS_MAX_LATENCY)
506 op->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_SCAN_TIMEOUT_F;
507 if (status & MLX5_RXP_RESP_STATUS_MAX_MATCH)
508 op->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_MATCH_F;
509 if (status & MLX5_RXP_RESP_STATUS_MAX_PREFIX)
510 op->rsp_flags |= RTE_REGEX_OPS_RSP_MAX_PREFIX_F;
511 if (status & MLX5_RXP_RESP_STATUS_MAX_PRI_THREADS)
512 op->rsp_flags |= RTE_REGEX_OPS_RSP_RESOURCE_LIMIT_REACHED_F;
513 if (status & MLX5_RXP_RESP_STATUS_MAX_SEC_THREADS)
514 op->rsp_flags |= RTE_REGEX_OPS_RSP_RESOURCE_LIMIT_REACHED_F;
517 static inline volatile struct mlx5_cqe *
518 poll_one(struct mlx5_regex_cq *cq)
520 volatile struct mlx5_cqe *cqe;
521 size_t next_cqe_offset;
523 next_cqe_offset = (cq->ci & (cq_size_get(cq) - 1));
524 cqe = (volatile struct mlx5_cqe *)(cq->cq_obj.cqes + next_cqe_offset);
527 int ret = check_cqe(cqe, cq_size_get(cq), cq->ci);
529 if (unlikely(ret == MLX5_CQE_STATUS_ERR)) {
530 DRV_LOG(ERR, "Completion with error on qp 0x%x", 0);
534 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN))
542 * DPDK callback for dequeue.
545 * Pointer to the regex dev structure.
547 * The queue to enqueue the traffic to.
549 * List of regex ops to dequeue.
551 * Number of ops in ops parameter.
554 * Number of packets successfully dequeued (<= pkts_n).
557 mlx5_regexdev_dequeue(struct rte_regexdev *dev, uint16_t qp_id,
558 struct rte_regex_ops **ops, uint16_t nb_ops)
560 struct mlx5_regex_priv *priv = dev->data->dev_private;
561 struct mlx5_regex_qp *queue = &priv->qps[qp_id];
562 struct mlx5_regex_cq *cq = &queue->cq;
563 volatile struct mlx5_cqe *cqe;
566 while ((cqe = poll_one(cq))) {
568 = (rte_be_to_cpu_16(cqe->wqe_counter) + 1) &
569 MLX5_REGEX_MAX_WQE_INDEX;
570 size_t sqid = cqe->rsvd3[2];
571 struct mlx5_regex_sq *sq = &queue->sqs[sqid];
573 /* UMR mode WQE counter move as WQE set(4 WQEBBS).*/
576 while (sq->ci != wq_counter) {
577 if (unlikely(i == nb_ops)) {
578 /* Return without updating cq->ci */
581 uint32_t job_id = job_id_get(sqid, sq_size_get(sq),
583 extract_result(ops[i], &queue->jobs[job_id]);
584 sq->ci = (sq->ci + 1) & (priv->has_umr ?
585 (MLX5_REGEX_MAX_WQE_INDEX >> 2) :
586 MLX5_REGEX_MAX_WQE_INDEX);
589 cq->ci = (cq->ci + 1) & 0xffffff;
591 cq->cq_obj.db_rec[0] = rte_cpu_to_be_32(cq->ci);
592 queue->free_sqs |= (1 << sqid);
601 setup_sqs(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *queue)
605 for (sqid = 0; sqid < queue->nb_obj; sqid++) {
606 struct mlx5_regex_sq *sq = &queue->sqs[sqid];
607 uint8_t *wqe = (uint8_t *)(uintptr_t)sq->sq_obj.wqes;
608 for (entry = 0 ; entry < sq_size_get(sq); entry++) {
609 job_id = sqid * sq_size_get(sq) + entry;
610 struct mlx5_regex_job *job = &queue->jobs[job_id];
612 /* Fill UMR WQE with NOP in advanced. */
615 ((struct mlx5_wqe_ctrl_seg *)wqe,
616 entry * 2, MLX5_OPCODE_NOP, 0,
617 sq->sq_obj.sq->id, 0, 12, 0, 0);
618 wqe += MLX5_REGEX_UMR_WQE_SIZE;
620 set_metadata_seg((struct mlx5_wqe_metadata_seg *)
621 (wqe + MLX5_REGEX_WQE_METADATA_OFFSET),
622 0, queue->metadata->lkey,
623 (uintptr_t)job->metadata);
624 set_data_seg((struct mlx5_wqe_data_seg *)
625 (wqe + MLX5_REGEX_WQE_SCATTER_OFFSET),
626 MLX5_REGEX_MAX_OUTPUT,
627 queue->outputs->lkey,
628 (uintptr_t)job->output);
631 queue->free_sqs |= 1 << sqid;
636 setup_buffers(struct mlx5_regex_priv *priv, struct mlx5_regex_qp *qp)
638 struct ibv_pd *pd = priv->pd;
642 void *ptr = rte_calloc(__func__, qp->nb_desc,
643 MLX5_REGEX_METADATA_SIZE,
644 MLX5_REGEX_METADATA_SIZE);
648 qp->metadata = mlx5_glue->reg_mr(pd, ptr,
649 MLX5_REGEX_METADATA_SIZE * qp->nb_desc,
650 IBV_ACCESS_LOCAL_WRITE);
652 DRV_LOG(ERR, "Failed to register metadata");
657 ptr = rte_calloc(__func__, qp->nb_desc,
658 MLX5_REGEX_MAX_OUTPUT,
659 MLX5_REGEX_MAX_OUTPUT);
664 qp->outputs = mlx5_glue->reg_mr(pd, ptr,
665 MLX5_REGEX_MAX_OUTPUT * qp->nb_desc,
666 IBV_ACCESS_LOCAL_WRITE);
669 DRV_LOG(ERR, "Failed to register output");
675 ptr = rte_calloc(__func__, qp->nb_desc, MLX5_REGEX_KLMS_SIZE,
676 MLX5_REGEX_KLMS_SIZE);
681 qp->imkey_addr = mlx5_glue->reg_mr(pd, ptr,
682 MLX5_REGEX_KLMS_SIZE * qp->nb_desc,
683 IBV_ACCESS_LOCAL_WRITE);
684 if (!qp->imkey_addr) {
686 DRV_LOG(ERR, "Failed to register output");
692 /* distribute buffers to jobs */
693 for (i = 0; i < qp->nb_desc; i++) {
695 (uint8_t *)qp->outputs->addr +
696 (i % qp->nb_desc) * MLX5_REGEX_MAX_OUTPUT;
697 qp->jobs[i].metadata =
698 (uint8_t *)qp->metadata->addr +
699 (i % qp->nb_desc) * MLX5_REGEX_METADATA_SIZE;
701 qp->jobs[i].imkey_array = (struct mlx5_klm *)
702 qp->imkey_addr->addr +
703 (i % qp->nb_desc) * MLX5_REGEX_MAX_KLM_NUM;
709 ptr = qp->outputs->addr;
711 mlx5_glue->dereg_mr(qp->outputs);
713 ptr = qp->metadata->addr;
715 mlx5_glue->dereg_mr(qp->metadata);
720 mlx5_regexdev_setup_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id)
722 struct mlx5_regex_qp *qp = &priv->qps[qp_id];
723 struct mlx5_klm klm = { 0 };
724 struct mlx5_devx_mkey_attr attr = {
732 qp->jobs = rte_calloc(__func__, qp->nb_desc, sizeof(*qp->jobs), 64);
735 err = setup_buffers(priv, qp);
744 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
745 if (regex_get_pdn(priv->pd, &attr.pd)) {
747 DRV_LOG(ERR, "Failed to get pdn.");
748 mlx5_regexdev_teardown_fastpath(priv, qp_id);
752 for (i = 0; i < qp->nb_desc; i++) {
753 attr.klm_num = MLX5_REGEX_MAX_KLM_NUM;
754 attr.klm_array = qp->jobs[i].imkey_array;
755 qp->jobs[i].imkey = mlx5_devx_cmd_mkey_create(priv->ctx,
757 if (!qp->jobs[i].imkey) {
759 DRV_LOG(ERR, "Failed to allocate imkey.");
760 mlx5_regexdev_teardown_fastpath(priv, qp_id);
768 free_buffers(struct mlx5_regex_qp *qp)
770 if (qp->imkey_addr) {
771 mlx5_glue->dereg_mr(qp->imkey_addr);
772 rte_free(qp->imkey_addr->addr);
775 mlx5_glue->dereg_mr(qp->metadata);
776 rte_free(qp->metadata->addr);
779 mlx5_glue->dereg_mr(qp->outputs);
780 rte_free(qp->outputs->addr);
785 mlx5_regexdev_teardown_fastpath(struct mlx5_regex_priv *priv, uint32_t qp_id)
787 struct mlx5_regex_qp *qp = &priv->qps[qp_id];
791 for (i = 0; i < qp->nb_desc; i++) {
792 if (qp->jobs[i].imkey)
793 claim_zero(mlx5_devx_cmd_destroy
794 (qp->jobs[i].imkey));