1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
17 #include <rte_common.h>
18 #include <rte_malloc.h>
19 #include <rte_mempool.h>
21 #include <rte_cycles.h>
22 #include <rte_regexdev.h>
24 #define MAX_FILE_NAME 255
25 #define MBUF_CACHE_SIZE 256
26 #define MBUF_SIZE (1 << 8)
27 #define START_BURST_SIZE 32u
35 ARG_NUM_OF_ITERATIONS,
42 struct rte_mbuf *mbuf;
46 uint32_t total_enqueue;
47 uint32_t total_dequeue;
48 uint32_t total_matches;
49 struct rte_regex_ops **ops;
50 struct job_ctx *jobs_ctx;
56 struct qps_per_lcore {
57 unsigned int lcore_id;
66 uint32_t nb_iterations;
68 uint8_t nb_max_matches;
78 usage(const char *prog_name)
80 printf("%s [EAL options] --\n"
81 " --rules NAME: precompiled rules file\n"
82 " --data NAME: data file to use\n"
83 " --nb_jobs: number of jobs to use\n"
84 " --perf N: only outputs the performance data\n"
85 " --nb_iter N: number of iteration to run\n"
86 " --nb_qps N: number of queues to use\n"
87 " --nb_lcores N: number of lcores to use\n"
88 " --nb_segs N: number of mbuf segments\n",
93 args_parse(int argc, char **argv, char *rules_file, char *data_file,
94 uint32_t *nb_jobs, bool *perf_mode, uint32_t *nb_iterations,
95 uint32_t *nb_qps, uint32_t *nb_lcores, uint32_t *nb_segs)
101 static struct option lgopts[] = {
102 { "help", 0, 0, ARG_HELP},
103 /* Rules database file to load. */
104 { "rules", 1, 0, ARG_RULES_FILE_NAME},
105 /* Data file to load. */
106 { "data", 1, 0, ARG_DATA_FILE_NAME},
107 /* Number of jobs to create. */
108 { "nb_jobs", 1, 0, ARG_NUM_OF_JOBS},
110 { "perf", 0, 0, ARG_PERF_MODE},
111 /* Number of iterations to run with perf test */
112 { "nb_iter", 1, 0, ARG_NUM_OF_ITERATIONS},
114 { "nb_qps", 1, 0, ARG_NUM_OF_QPS},
115 /* Number of lcores. */
116 { "nb_lcores", 1, 0, ARG_NUM_OF_LCORES},
117 /* Number of mbuf segments. */
118 { "nb_segs", 1, 0, ARG_NUM_OF_MBUF_SEGS},
124 while ((opt = getopt_long(argc, argvopt, "",
125 lgopts, &opt_idx)) != EOF) {
127 case ARG_RULES_FILE_NAME:
128 len = strnlen(optarg, MAX_FILE_NAME - 1);
129 if (len == MAX_FILE_NAME)
130 rte_exit(EXIT_FAILURE,
131 "Rule file name to long max %d\n",
133 strncpy(rules_file, optarg, MAX_FILE_NAME - 1);
135 case ARG_DATA_FILE_NAME:
136 len = strnlen(optarg, MAX_FILE_NAME - 1);
137 if (len == MAX_FILE_NAME)
138 rte_exit(EXIT_FAILURE,
139 "Data file name to long max %d\n",
141 strncpy(data_file, optarg, MAX_FILE_NAME - 1);
143 case ARG_NUM_OF_JOBS:
144 *nb_jobs = atoi(optarg);
149 case ARG_NUM_OF_ITERATIONS:
150 *nb_iterations = atoi(optarg);
153 *nb_qps = atoi(optarg);
155 case ARG_NUM_OF_LCORES:
156 *nb_lcores = atoi(optarg);
158 case ARG_NUM_OF_MBUF_SEGS:
159 *nb_segs = atoi(optarg);
166 rte_exit(EXIT_FAILURE, "Invalid option: %s\n", argv[optind]);
176 read_file(char *file, char **buf)
183 fp = fopen(file, "r");
186 if (fseek(fp, 0L, SEEK_END) == 0) {
192 *buf = rte_malloc(NULL, sizeof(char) * (buf_len + 1), 4096);
197 if (fseek(fp, 0L, SEEK_SET) != 0) {
201 read_len = fread(*buf, sizeof(char), buf_len, fp);
202 if (read_len != (unsigned long)buf_len) {
210 printf("Error, can't open file %s\n, err = %d", file, res);
218 clone_buf(char *data_buf, char **buf, long data_len)
222 rte_malloc(NULL, sizeof(char) * (data_len + 1), 4096);
225 memcpy(dest_buf, data_buf, data_len + 1);
231 init_port(uint16_t *nb_max_payload, char *rules_file, uint8_t *nb_max_matches,
239 struct rte_regexdev_info info;
240 struct rte_regexdev_config dev_conf = {
241 .nb_queue_pairs = nb_qps,
244 struct rte_regexdev_qp_conf qp_conf = {
250 num_devs = rte_regexdev_count();
252 printf("Error, no devices detected.\n");
256 rules_len = read_file(rules_file, &rules);
258 printf("Error, can't read rules files.\n");
263 for (id = 0; id < num_devs; id++) {
264 res = rte_regexdev_info_get(id, &info);
266 printf("Error, can't get device info.\n");
269 printf(":: initializing dev: %d\n", id);
270 *nb_max_matches = info.max_matches;
271 *nb_max_payload = info.max_payload_size;
272 if (info.regexdev_capa & RTE_REGEXDEV_SUPP_MATCH_AS_END_F)
273 dev_conf.dev_cfg_flags |=
274 RTE_REGEXDEV_CFG_MATCH_AS_END_F;
275 dev_conf.nb_max_matches = info.max_matches;
276 dev_conf.nb_rules_per_group = info.max_rules_per_group;
277 dev_conf.rule_db_len = rules_len;
278 dev_conf.rule_db = rules;
279 res = rte_regexdev_configure(id, &dev_conf);
281 printf("Error, can't configure device %d.\n", id);
284 if (info.regexdev_capa & RTE_REGEXDEV_CAPA_QUEUE_PAIR_OOS_F)
285 qp_conf.qp_conf_flags |=
286 RTE_REGEX_QUEUE_PAIR_CFG_OOS_F;
287 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
288 res = rte_regexdev_queue_pair_setup(id, qp_id,
291 printf("Error, can't setup queue pair %u for "
292 "device %d.\n", qp_id, id);
296 printf(":: initializing device: %d done\n", id);
306 extbuf_free_cb(void *addr __rte_unused, void *fcb_opaque __rte_unused)
310 static inline struct rte_mbuf *
311 regex_create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len,
312 int nb_segs, void *buf) {
314 struct rte_mbuf *m = NULL, *mbuf = NULL;
322 printf("Packet size must be 1 or more (is %d)\n", pkt_len);
327 printf("Number of segments must be 1 or more (is %d)\n",
332 t_len = pkt_len >= nb_segs ? (pkt_len / nb_segs +
333 !!(pkt_len % nb_segs)) : 1;
336 /* Create chained mbuf_src and fill it with buf data */
337 for (i = 0; size > 0; i++) {
339 m = rte_pktmbuf_alloc(mbuf_pool);
344 printf("Cannot create segment for source mbuf");
348 data_len = size > t_len ? t_len : size;
349 memset(rte_pktmbuf_mtod(m, uint8_t *), 0,
350 rte_pktmbuf_tailroom(m));
351 memcpy(rte_pktmbuf_mtod(m, uint8_t *), src, data_len);
352 dst = (uint8_t *)rte_pktmbuf_append(m, data_len);
354 printf("Cannot append %d bytes to the mbuf\n",
360 rte_pktmbuf_chain(mbuf, m);
368 rte_pktmbuf_free(mbuf);
373 run_regex(void *args)
375 struct regex_conf *rgxc = args;
376 uint32_t nb_jobs = rgxc->nb_jobs;
377 uint32_t nb_segs = rgxc->nb_segs;
378 uint32_t nb_iterations = rgxc->nb_iterations;
379 uint8_t nb_max_matches = rgxc->nb_max_matches;
380 uint32_t nb_qps = rgxc->nb_qps;
381 uint16_t qp_id_base = rgxc->qp_id_base;
382 char *data_buf = rgxc->data_buf;
383 long data_len = rgxc->data_len;
384 long job_len = rgxc->job_len;
387 uint32_t actual_jobs = 0;
392 struct rte_regexdev_match *match;
394 unsigned long d_ind = 0;
395 struct rte_mbuf_ext_shared_info shinfo;
398 struct rte_mempool *mbuf_mp;
399 struct qp_params *qp;
400 struct qp_params *qps = NULL;
402 uint16_t qps_used = 0;
405 shinfo.free_cb = extbuf_free_cb;
408 "mbuf_pool_%2u", qp_id_base);
409 mbuf_mp = rte_pktmbuf_pool_create(mbuf_pool,
410 rte_align32pow2(nb_jobs * nb_qps * nb_segs),
411 0, 0, (nb_segs == 1) ? MBUF_SIZE :
412 (rte_align32pow2(job_len) / nb_segs +
413 RTE_PKTMBUF_HEADROOM),
415 if (mbuf_mp == NULL) {
416 printf("Error, can't create memory pool\n");
420 qps = rte_malloc(NULL, sizeof(*qps) * nb_qps, 0);
422 printf("Error, can't allocate memory for QPs\n");
427 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
428 struct rte_regex_ops **ops;
429 struct job_ctx *jobs_ctx;
435 qp->ops = ops = rte_malloc(NULL, sizeof(*ops) * nb_jobs, 0);
437 printf("Error, can't allocate memory for ops.\n");
442 qp->jobs_ctx = jobs_ctx =
443 rte_malloc(NULL, sizeof(*jobs_ctx) * nb_jobs, 0);
445 printf("Error, can't allocate memory for jobs_ctx.\n");
450 if (clone_buf(data_buf, &buf, data_len)) {
451 printf("Error, can't clone buf.\n");
456 /* Assign each mbuf with the data to handle. */
459 /* Allocate the jobs and assign each job with an mbuf. */
460 for (i = 0; (pos < data_len) && (i < nb_jobs) ; i++) {
461 long act_job_len = RTE_MIN(job_len, data_len - pos);
463 ops[i] = rte_malloc(NULL, sizeof(*ops[0]) +
465 sizeof(struct rte_regexdev_match), 0);
467 printf("Error, can't allocate "
473 ops[i]->mbuf = regex_create_segmented_mbuf
474 (mbuf_mp, act_job_len,
477 ops[i]->mbuf = rte_pktmbuf_alloc(mbuf_mp);
479 rte_pktmbuf_attach_extbuf(ops[i]->mbuf,
480 &buf[pos], 0, act_job_len, &shinfo);
481 ops[i]->mbuf->data_len = job_len;
482 ops[i]->mbuf->pkt_len = act_job_len;
486 printf("Error, can't add mbuf.\n");
491 jobs_ctx[i].mbuf = ops[i]->mbuf;
493 ops[i]->group_id0 = 1;
499 qp->total_matches = 0;
504 for (i = 0; i < nb_iterations; i++) {
505 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
507 qp->total_enqueue = 0;
508 qp->total_dequeue = 0;
512 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
514 if (qp->total_dequeue < actual_jobs) {
515 qp->start = rte_rdtsc_precise();
516 struct rte_regex_ops **
517 cur_ops_to_enqueue = qp->ops +
520 if (actual_jobs - qp->total_enqueue)
522 rte_regexdev_enqueue_burst
530 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
532 if (qp->total_dequeue < actual_jobs) {
533 struct rte_regex_ops **
534 cur_ops_to_dequeue = qp->ops +
538 rte_regexdev_dequeue_burst
545 (rte_rdtsc_precise() - qp->start);
551 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
553 time = (long double)qp->cycles / rte_get_timer_hz();
554 printf("Core=%u QP=%u Job=%ld Bytes Time=%Lf sec Perf=%Lf "
555 "Gbps\n", rte_lcore_id(), qp_id + qp_id_base,
557 (((double)actual_jobs * job_len * nb_iterations * 8)
558 / time) / 1000000000.0);
563 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
564 printf("\n############ Core=%u QP=%u ############\n",
565 rte_lcore_id(), qp_id + qp_id_base);
567 /* Log results per job. */
568 for (d_ind = 0; d_ind < qp->total_dequeue; d_ind++) {
569 nb_matches = qp->ops[d_ind % actual_jobs]->nb_matches;
570 printf("Job id %"PRIu64" number of matches = %d\n",
571 qp->ops[d_ind]->user_id, nb_matches);
572 qp->total_matches += nb_matches;
573 match = qp->ops[d_ind % actual_jobs]->matches;
574 for (i = 0; i < nb_matches; i++) {
575 printf("match %d, rule = %d, "
576 "start = %d,len = %d\n",
577 i, match->rule_id, match->start_offset,
582 printf("Total matches = %d\n", qp->total_matches);
583 printf("All Matches:\n");
584 /* Log absolute results. */
585 for (d_ind = 0; d_ind < qp->total_dequeue; d_ind++) {
586 nb_matches = qp->ops[d_ind % actual_jobs]->nb_matches;
587 qp->total_matches += nb_matches;
588 match = qp->ops[d_ind % actual_jobs]->matches;
589 for (i = 0; i < nb_matches; i++) {
590 printf("start = %ld, len = %d, rule = %d\n",
591 match->start_offset +
593 match->len, match->rule_id);
599 for (qp_id = 0; qp_id < qps_used; qp_id++) {
601 for (i = 0; i < actual_jobs && qp->ops; i++)
602 rte_free(qp->ops[i]);
605 for (i = 0; i < actual_jobs && qp->jobs_ctx; i++)
606 rte_pktmbuf_free(qp->jobs_ctx[i].mbuf);
607 rte_free(qp->jobs_ctx);
612 rte_mempool_free(mbuf_mp);
618 distribute_qps_to_lcores(uint32_t nb_cores, uint32_t nb_qps,
619 struct qps_per_lcore **qpl)
626 struct qps_per_lcore *qps_per_lcore;
627 uint32_t detected_lcores;
629 if (nb_qps < nb_cores) {
631 printf("Reducing number of cores to number of QPs (%u)\n",
634 /* Allocate qps_per_lcore array */
636 rte_malloc(NULL, sizeof(*qps_per_lcore) * nb_cores, 0);
638 rte_exit(EXIT_FAILURE, "Failed to create qps_per_lcore array\n");
639 *qpl = qps_per_lcore;
643 RTE_LCORE_FOREACH_WORKER(lcore_id) {
644 if (detected_lcores >= nb_cores)
646 qps_per_lcore[detected_lcores].lcore_id = lcore_id;
647 socket = rte_lcore_to_socket_id(lcore_id);
648 if (socket == SOCKET_ID_ANY)
650 qps_per_lcore[detected_lcores].socket = socket;
651 qps_per_lcore[detected_lcores].qp_id_base = min_qp_id;
652 max_qp_id = min_qp_id + nb_qps / nb_cores - 1;
653 if (nb_qps % nb_cores > detected_lcores)
655 qps_per_lcore[detected_lcores].nb_qps = max_qp_id -
657 min_qp_id = max_qp_id + 1;
660 if (detected_lcores != nb_cores)
663 for (i = 0; i < detected_lcores; i++) {
664 printf("===> Core %d: allocated queues: ",
665 qps_per_lcore[i].lcore_id);
666 min_qp_id = qps_per_lcore[i].qp_id_base;
668 qps_per_lcore[i].qp_id_base + qps_per_lcore[i].nb_qps;
669 while (min_qp_id < max_qp_id) {
670 printf("%u ", min_qp_id);
679 main(int argc, char **argv)
681 char rules_file[MAX_FILE_NAME];
682 char data_file[MAX_FILE_NAME];
683 uint32_t nb_jobs = 0;
685 uint32_t nb_iterations = 0;
687 uint16_t nb_max_payload = 0;
688 uint8_t nb_max_matches = 0;
693 uint32_t nb_lcores = 1, nb_segs = 1;
694 struct regex_conf *rgxc;
696 struct qps_per_lcore *qps_per_lcore;
699 ret = rte_eal_init(argc, argv);
701 rte_exit(EXIT_FAILURE, "EAL init failed\n");
705 args_parse(argc, argv, rules_file, data_file, &nb_jobs,
706 &perf_mode, &nb_iterations, &nb_qps,
707 &nb_lcores, &nb_segs);
710 rte_exit(EXIT_FAILURE, "Number of QPs must be greater than 0\n");
712 rte_exit(EXIT_FAILURE, "Number of lcores must be greater than 0\n");
713 if (distribute_qps_to_lcores(nb_lcores, nb_qps, &qps_per_lcore) < 0)
714 rte_exit(EXIT_FAILURE, "Failed to distribute queues to lcores!\n");
715 ret = init_port(&nb_max_payload, rules_file,
716 &nb_max_matches, nb_qps);
718 rte_exit(EXIT_FAILURE, "init port failed\n");
720 data_len = read_file(data_file, &data_buf);
722 rte_exit(EXIT_FAILURE, "Error, can't read file, or file is empty.\n");
724 job_len = data_len / nb_jobs;
726 rte_exit(EXIT_FAILURE, "Error, To many jobs, for the given input.\n");
728 if (job_len > nb_max_payload)
729 rte_exit(EXIT_FAILURE, "Error, not enough jobs to cover input.\n");
731 rgxc = rte_malloc(NULL, sizeof(*rgxc) * nb_lcores, 0);
733 rte_exit(EXIT_FAILURE, "Failed to create Regex Conf\n");
734 for (i = 0; i < nb_lcores; i++) {
735 rgxc[i] = (struct regex_conf){
738 .perf_mode = perf_mode,
739 .nb_iterations = nb_iterations,
740 .nb_max_matches = nb_max_matches,
741 .nb_qps = qps_per_lcore[i].nb_qps,
742 .qp_id_base = qps_per_lcore[i].qp_id_base,
743 .data_buf = data_buf,
744 .data_len = data_len,
747 rte_eal_remote_launch(run_regex, &rgxc[i],
748 qps_per_lcore[i].lcore_id);
750 rte_eal_mp_wait_lcore();
753 rte_free(qps_per_lcore);