1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
17 #include <rte_common.h>
18 #include <rte_malloc.h>
19 #include <rte_mempool.h>
21 #include <rte_cycles.h>
22 #include <rte_regexdev.h>
24 #define MAX_FILE_NAME 255
25 #define MBUF_CACHE_SIZE 256
26 #define MBUF_SIZE (1 << 8)
27 #define START_BURST_SIZE 32u
35 ARG_NUM_OF_ITERATIONS,
42 struct rte_mbuf *mbuf;
46 uint32_t total_enqueue;
47 uint32_t total_dequeue;
48 uint32_t total_matches;
49 struct rte_regex_ops **ops;
50 struct job_ctx *jobs_ctx;
56 struct qps_per_lcore {
57 unsigned int lcore_id;
66 uint32_t nb_iterations;
68 uint8_t nb_max_matches;
78 usage(const char *prog_name)
80 printf("%s [EAL options] --\n"
81 " --rules NAME: precompiled rules file\n"
82 " --data NAME: data file to use\n"
83 " --nb_jobs: number of jobs to use\n"
84 " --perf N: only outputs the performance data\n"
85 " --nb_iter N: number of iteration to run\n"
86 " --nb_qps N: number of queues to use\n"
87 " --nb_lcores N: number of lcores to use\n"
88 " --nb_segs N: number of mbuf segments\n",
93 args_parse(int argc, char **argv, char *rules_file, char *data_file,
94 uint32_t *nb_jobs, bool *perf_mode, uint32_t *nb_iterations,
95 uint32_t *nb_qps, uint32_t *nb_lcores, uint32_t *nb_segs)
101 static struct option lgopts[] = {
102 { "help", 0, 0, ARG_HELP},
103 /* Rules database file to load. */
104 { "rules", 1, 0, ARG_RULES_FILE_NAME},
105 /* Data file to load. */
106 { "data", 1, 0, ARG_DATA_FILE_NAME},
107 /* Number of jobs to create. */
108 { "nb_jobs", 1, 0, ARG_NUM_OF_JOBS},
110 { "perf", 0, 0, ARG_PERF_MODE},
111 /* Number of iterations to run with perf test */
112 { "nb_iter", 1, 0, ARG_NUM_OF_ITERATIONS},
114 { "nb_qps", 1, 0, ARG_NUM_OF_QPS},
115 /* Number of lcores. */
116 { "nb_lcores", 1, 0, ARG_NUM_OF_LCORES},
117 /* Number of mbuf segments. */
118 { "nb_segs", 1, 0, ARG_NUM_OF_MBUF_SEGS},
124 while ((opt = getopt_long(argc, argvopt, "",
125 lgopts, &opt_idx)) != EOF) {
127 case ARG_RULES_FILE_NAME:
128 len = strnlen(optarg, MAX_FILE_NAME - 1);
129 if (len == MAX_FILE_NAME)
130 rte_exit(EXIT_FAILURE,
131 "Rule file name to long max %d\n",
133 strncpy(rules_file, optarg, MAX_FILE_NAME - 1);
135 case ARG_DATA_FILE_NAME:
136 len = strnlen(optarg, MAX_FILE_NAME - 1);
137 if (len == MAX_FILE_NAME)
138 rte_exit(EXIT_FAILURE,
139 "Data file name to long max %d\n",
141 strncpy(data_file, optarg, MAX_FILE_NAME - 1);
143 case ARG_NUM_OF_JOBS:
144 *nb_jobs = atoi(optarg);
149 case ARG_NUM_OF_ITERATIONS:
150 *nb_iterations = atoi(optarg);
153 *nb_qps = atoi(optarg);
155 case ARG_NUM_OF_LCORES:
156 *nb_lcores = atoi(optarg);
158 case ARG_NUM_OF_MBUF_SEGS:
159 *nb_segs = atoi(optarg);
166 rte_exit(EXIT_FAILURE, "Invalid option: %s\n", argv[optind]);
176 read_file(char *file, char **buf)
183 fp = fopen(file, "r");
186 if (fseek(fp, 0L, SEEK_END) == 0) {
192 *buf = rte_malloc(NULL, sizeof(char) * (buf_len + 1), 4096);
197 if (fseek(fp, 0L, SEEK_SET) != 0) {
201 read_len = fread(*buf, sizeof(char), buf_len, fp);
202 if (read_len != (unsigned long)buf_len) {
210 printf("Error, can't open file %s\n, err = %d", file, res);
219 clone_buf(char *data_buf, char **buf, long data_len)
223 rte_malloc(NULL, sizeof(char) * (data_len + 1), 4096);
226 memcpy(dest_buf, data_buf, data_len + 1);
232 init_port(uint16_t *nb_max_payload, char *rules_file, uint8_t *nb_max_matches,
240 struct rte_regexdev_info info;
241 struct rte_regexdev_config dev_conf = {
242 .nb_queue_pairs = nb_qps,
245 struct rte_regexdev_qp_conf qp_conf = {
251 num_devs = rte_regexdev_count();
253 printf("Error, no devices detected.\n");
257 rules_len = read_file(rules_file, &rules);
259 printf("Error, can't read rules files.\n");
264 for (id = 0; id < num_devs; id++) {
265 res = rte_regexdev_info_get(id, &info);
267 printf("Error, can't get device info.\n");
270 printf(":: initializing dev: %d\n", id);
271 *nb_max_matches = info.max_matches;
272 *nb_max_payload = info.max_payload_size;
273 if (info.regexdev_capa & RTE_REGEXDEV_SUPP_MATCH_AS_END_F)
274 dev_conf.dev_cfg_flags |=
275 RTE_REGEXDEV_CFG_MATCH_AS_END_F;
276 dev_conf.nb_max_matches = info.max_matches;
277 dev_conf.nb_rules_per_group = info.max_rules_per_group;
278 dev_conf.rule_db_len = rules_len;
279 dev_conf.rule_db = rules;
280 res = rte_regexdev_configure(id, &dev_conf);
282 printf("Error, can't configure device %d.\n", id);
285 if (info.regexdev_capa & RTE_REGEXDEV_CAPA_QUEUE_PAIR_OOS_F)
286 qp_conf.qp_conf_flags |=
287 RTE_REGEX_QUEUE_PAIR_CFG_OOS_F;
288 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
289 res = rte_regexdev_queue_pair_setup(id, qp_id,
292 printf("Error, can't setup queue pair %u for "
293 "device %d.\n", qp_id, id);
297 printf(":: initializing device: %d done\n", id);
308 extbuf_free_cb(void *addr __rte_unused, void *fcb_opaque __rte_unused)
312 static inline struct rte_mbuf *
313 regex_create_segmented_mbuf(struct rte_mempool *mbuf_pool, int pkt_len,
314 int nb_segs, void *buf) {
316 struct rte_mbuf *m = NULL, *mbuf = NULL;
324 printf("Packet size must be 1 or more (is %d)\n", pkt_len);
329 printf("Number of segments must be 1 or more (is %d)\n",
334 t_len = pkt_len >= nb_segs ? (pkt_len / nb_segs +
335 !!(pkt_len % nb_segs)) : 1;
338 /* Create chained mbuf_src and fill it with buf data */
339 for (i = 0; size > 0; i++) {
341 m = rte_pktmbuf_alloc(mbuf_pool);
346 printf("Cannot create segment for source mbuf");
350 data_len = size > t_len ? t_len : size;
351 memset(rte_pktmbuf_mtod(m, uint8_t *), 0,
352 rte_pktmbuf_tailroom(m));
353 memcpy(rte_pktmbuf_mtod(m, uint8_t *), src, data_len);
354 dst = (uint8_t *)rte_pktmbuf_append(m, data_len);
356 printf("Cannot append %d bytes to the mbuf\n",
362 rte_pktmbuf_chain(mbuf, m);
371 rte_pktmbuf_free(mbuf);
376 run_regex(void *args)
378 struct regex_conf *rgxc = args;
379 uint32_t nb_jobs = rgxc->nb_jobs;
380 uint32_t nb_segs = rgxc->nb_segs;
381 uint32_t nb_iterations = rgxc->nb_iterations;
382 uint8_t nb_max_matches = rgxc->nb_max_matches;
383 uint32_t nb_qps = rgxc->nb_qps;
384 uint16_t qp_id_base = rgxc->qp_id_base;
385 char *data_buf = rgxc->data_buf;
386 long data_len = rgxc->data_len;
387 long job_len = rgxc->job_len;
390 uint32_t actual_jobs = 0;
395 struct rte_regexdev_match *match;
397 unsigned long d_ind = 0;
398 struct rte_mbuf_ext_shared_info shinfo;
401 struct rte_mempool *mbuf_mp;
402 struct qp_params *qp;
403 struct qp_params *qps = NULL;
405 uint16_t qps_used = 0;
408 shinfo.free_cb = extbuf_free_cb;
411 "mbuf_pool_%2u", qp_id_base);
412 mbuf_mp = rte_pktmbuf_pool_create(mbuf_pool,
413 rte_align32pow2(nb_jobs * nb_qps * nb_segs),
414 0, 0, (nb_segs == 1) ? MBUF_SIZE :
415 (rte_align32pow2(job_len) / nb_segs +
416 RTE_PKTMBUF_HEADROOM),
418 if (mbuf_mp == NULL) {
419 printf("Error, can't create memory pool\n");
423 qps = rte_malloc(NULL, sizeof(*qps) * nb_qps, 0);
425 printf("Error, can't allocate memory for QPs\n");
430 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
431 struct rte_regex_ops **ops;
432 struct job_ctx *jobs_ctx;
438 qp->ops = ops = rte_malloc(NULL, sizeof(*ops) * nb_jobs, 0);
440 printf("Error, can't allocate memory for ops.\n");
445 qp->jobs_ctx = jobs_ctx =
446 rte_malloc(NULL, sizeof(*jobs_ctx) * nb_jobs, 0);
448 printf("Error, can't allocate memory for jobs_ctx.\n");
453 if (clone_buf(data_buf, &buf, data_len)) {
454 printf("Error, can't clone buf.\n");
459 /* Assign each mbuf with the data to handle. */
462 /* Allocate the jobs and assign each job with an mbuf. */
463 for (i = 0; (pos < data_len) && (i < nb_jobs) ; i++) {
464 long act_job_len = RTE_MIN(job_len, data_len - pos);
466 ops[i] = rte_malloc(NULL, sizeof(*ops[0]) +
468 sizeof(struct rte_regexdev_match), 0);
470 printf("Error, can't allocate "
476 ops[i]->mbuf = regex_create_segmented_mbuf
477 (mbuf_mp, act_job_len,
480 ops[i]->mbuf = rte_pktmbuf_alloc(mbuf_mp);
482 rte_pktmbuf_attach_extbuf(ops[i]->mbuf,
483 &buf[pos], 0, act_job_len, &shinfo);
484 ops[i]->mbuf->data_len = job_len;
485 ops[i]->mbuf->pkt_len = act_job_len;
489 printf("Error, can't add mbuf.\n");
494 jobs_ctx[i].mbuf = ops[i]->mbuf;
496 ops[i]->group_id0 = 1;
502 qp->total_matches = 0;
507 for (i = 0; i < nb_iterations; i++) {
508 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
510 qp->total_enqueue = 0;
511 qp->total_dequeue = 0;
515 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
517 if (qp->total_dequeue < actual_jobs) {
518 qp->start = rte_rdtsc_precise();
519 struct rte_regex_ops **
520 cur_ops_to_enqueue = qp->ops +
523 if (actual_jobs - qp->total_enqueue)
525 rte_regexdev_enqueue_burst
533 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
535 if (qp->total_dequeue < actual_jobs) {
536 struct rte_regex_ops **
537 cur_ops_to_dequeue = qp->ops +
541 rte_regexdev_dequeue_burst
548 (rte_rdtsc_precise() - qp->start);
554 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
556 time = (long double)qp->cycles / rte_get_timer_hz();
557 printf("Core=%u QP=%u Job=%ld Bytes Time=%Lf sec Perf=%Lf "
558 "Gbps\n", rte_lcore_id(), qp_id + qp_id_base,
560 (((double)actual_jobs * job_len * nb_iterations * 8)
561 / time) / 1000000000.0);
566 for (qp_id = 0; qp_id < nb_qps; qp_id++) {
567 printf("\n############ Core=%u QP=%u ############\n",
568 rte_lcore_id(), qp_id + qp_id_base);
570 /* Log results per job. */
571 for (d_ind = 0; d_ind < qp->total_dequeue; d_ind++) {
572 nb_matches = qp->ops[d_ind % actual_jobs]->nb_matches;
573 printf("Job id %"PRIu64" number of matches = %d\n",
574 qp->ops[d_ind]->user_id, nb_matches);
575 qp->total_matches += nb_matches;
576 match = qp->ops[d_ind % actual_jobs]->matches;
577 for (i = 0; i < nb_matches; i++) {
578 printf("match %d, rule = %d, "
579 "start = %d,len = %d\n",
580 i, match->rule_id, match->start_offset,
585 printf("Total matches = %d\n", qp->total_matches);
586 printf("All Matches:\n");
587 /* Log absolute results. */
588 for (d_ind = 0; d_ind < qp->total_dequeue; d_ind++) {
589 nb_matches = qp->ops[d_ind % actual_jobs]->nb_matches;
590 qp->total_matches += nb_matches;
591 match = qp->ops[d_ind % actual_jobs]->matches;
592 for (i = 0; i < nb_matches; i++) {
593 printf("start = %ld, len = %d, rule = %d\n",
594 match->start_offset +
596 match->len, match->rule_id);
602 for (qp_id = 0; qp_id < qps_used; qp_id++) {
604 for (i = 0; i < actual_jobs && qp->ops; i++)
605 rte_free(qp->ops[i]);
608 for (i = 0; i < actual_jobs && qp->jobs_ctx; i++)
609 rte_pktmbuf_free(qp->jobs_ctx[i].mbuf);
610 rte_free(qp->jobs_ctx);
616 rte_mempool_free(mbuf_mp);
622 distribute_qps_to_lcores(uint32_t nb_cores, uint32_t nb_qps,
623 struct qps_per_lcore **qpl)
630 struct qps_per_lcore *qps_per_lcore;
631 uint32_t detected_lcores;
633 if (nb_qps < nb_cores) {
635 printf("Reducing number of cores to number of QPs (%u)\n",
638 /* Allocate qps_per_lcore array */
640 rte_malloc(NULL, sizeof(*qps_per_lcore) * nb_cores, 0);
642 rte_exit(EXIT_FAILURE, "Failed to create qps_per_lcore array\n");
643 *qpl = qps_per_lcore;
647 RTE_LCORE_FOREACH_WORKER(lcore_id) {
648 if (detected_lcores >= nb_cores)
650 qps_per_lcore[detected_lcores].lcore_id = lcore_id;
651 socket = rte_lcore_to_socket_id(lcore_id);
652 if (socket == SOCKET_ID_ANY)
654 qps_per_lcore[detected_lcores].socket = socket;
655 qps_per_lcore[detected_lcores].qp_id_base = min_qp_id;
656 max_qp_id = min_qp_id + nb_qps / nb_cores - 1;
657 if (nb_qps % nb_cores > detected_lcores)
659 qps_per_lcore[detected_lcores].nb_qps = max_qp_id -
661 min_qp_id = max_qp_id + 1;
664 if (detected_lcores != nb_cores)
667 for (i = 0; i < detected_lcores; i++) {
668 printf("===> Core %d: allocated queues: ",
669 qps_per_lcore[i].lcore_id);
670 min_qp_id = qps_per_lcore[i].qp_id_base;
672 qps_per_lcore[i].qp_id_base + qps_per_lcore[i].nb_qps;
673 while (min_qp_id < max_qp_id) {
674 printf("%u ", min_qp_id);
683 main(int argc, char **argv)
685 char rules_file[MAX_FILE_NAME];
686 char data_file[MAX_FILE_NAME];
687 uint32_t nb_jobs = 0;
689 uint32_t nb_iterations = 0;
691 uint16_t nb_max_payload = 0;
692 uint8_t nb_max_matches = 0;
697 uint32_t nb_lcores = 1, nb_segs = 1;
698 struct regex_conf *rgxc;
700 struct qps_per_lcore *qps_per_lcore;
703 ret = rte_eal_init(argc, argv);
705 rte_exit(EXIT_FAILURE, "EAL init failed\n");
709 args_parse(argc, argv, rules_file, data_file, &nb_jobs,
710 &perf_mode, &nb_iterations, &nb_qps,
711 &nb_lcores, &nb_segs);
714 rte_exit(EXIT_FAILURE, "Number of QPs must be greater than 0\n");
716 rte_exit(EXIT_FAILURE, "Number of lcores must be greater than 0\n");
717 if (distribute_qps_to_lcores(nb_lcores, nb_qps, &qps_per_lcore) < 0)
718 rte_exit(EXIT_FAILURE, "Failed to distribute queues to lcores!\n");
719 ret = init_port(&nb_max_payload, rules_file,
720 &nb_max_matches, nb_qps);
722 rte_exit(EXIT_FAILURE, "init port failed\n");
724 data_len = read_file(data_file, &data_buf);
726 rte_exit(EXIT_FAILURE, "Error, can't read file, or file is empty.\n");
728 job_len = data_len / nb_jobs;
730 rte_exit(EXIT_FAILURE, "Error, To many jobs, for the given input.\n");
732 if (job_len > nb_max_payload)
733 rte_exit(EXIT_FAILURE, "Error, not enough jobs to cover input.\n");
735 rgxc = rte_malloc(NULL, sizeof(*rgxc) * nb_lcores, 0);
737 rte_exit(EXIT_FAILURE, "Failed to create Regex Conf\n");
738 for (i = 0; i < nb_lcores; i++) {
739 rgxc[i] = (struct regex_conf){
742 .perf_mode = perf_mode,
743 .nb_iterations = nb_iterations,
744 .nb_max_matches = nb_max_matches,
745 .nb_qps = qps_per_lcore[i].nb_qps,
746 .qp_id_base = qps_per_lcore[i].qp_id_base,
747 .data_buf = data_buf,
748 .data_len = data_len,
751 rte_eal_remote_launch(run_regex, &rgxc[i],
752 qps_per_lcore[i].lcore_id);
754 rte_eal_mp_wait_lcore();
757 rte_free(qps_per_lcore);