1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2020 Mellanox Technologies, Ltd
4 * This file contain the application main file
5 * This application provides the user the ability to test the
6 * insertion rate for specific rte_flow rule under stress state ~4M rule/
8 * Then it will also provide packet per second measurement after installing
9 * all rules, the user may send traffic to test the PPS that match the rules
10 * after all rules are installed, to check performance or functionality after
13 * The flows insertion will go for all ports first, then it will print the
14 * results, after that the application will go into forwarding packets mode
15 * it will start receiving traffic if any and then forwarding it back and
16 * gives packet per second measurement.
32 #include <rte_malloc.h>
33 #include <rte_mempool.h>
35 #include <rte_ethdev.h>
40 #include "actions_gen.h"
43 #define MAX_BATCHES_COUNT 100
44 #define DEFAULT_RULES_COUNT 4000000
45 #define DEFAULT_RULES_BATCH 100000
46 #define DEFAULT_GROUP 0
48 struct rte_flow *flow;
49 static uint8_t flow_group;
51 static uint64_t encap_data;
52 static uint64_t decap_data;
53 static uint64_t all_actions[RTE_COLORS][MAX_ACTIONS_NUM];
54 static char *actions_str[RTE_COLORS];
56 static uint64_t flow_items[MAX_ITEMS_NUM];
57 static uint64_t flow_actions[MAX_ACTIONS_NUM];
58 static uint64_t flow_attrs[MAX_ATTRS_NUM];
59 static uint32_t policy_id[MAX_PORTS];
60 static uint8_t items_idx, actions_idx, attrs_idx;
62 static uint64_t ports_mask;
63 static uint16_t dst_ports[RTE_MAX_ETHPORTS];
64 static volatile bool force_quit;
65 static bool dump_iterations;
66 static bool delete_flag;
67 static bool dump_socket_mem_flag;
68 static bool enable_fwd;
69 static bool unique_data;
70 static bool policy_mtr;
72 static uint8_t rx_queues_count;
73 static uint8_t tx_queues_count;
74 static uint8_t rxd_count;
75 static uint8_t txd_count;
76 static uint32_t mbuf_size;
77 static uint32_t mbuf_cache_size;
78 static uint32_t total_mbuf_num;
80 static struct rte_mempool *mbuf_mp;
81 static uint32_t nb_lcores;
82 static uint32_t rules_count;
83 static uint32_t rules_batch;
84 static uint32_t hairpin_queues_num; /* total hairpin q number - default: 0 */
85 static uint32_t nb_lcores;
86 static uint8_t max_priority;
87 static uint32_t rand_seed;
88 static uint64_t meter_profile_values[3]; /* CIR CBS EBS values. */
90 #define MAX_PKT_BURST 32
91 #define LCORE_MODE_PKT 1
92 #define LCORE_MODE_STATS 2
93 #define MAX_STREAMS 64
94 #define METER_CREATE 1
95 #define METER_DELETE 2
107 struct stream streams[MAX_STREAMS];
112 struct rte_mbuf *pkts[MAX_PKT_BURST];
113 } __rte_cache_aligned;
115 static struct lcore_info lcore_infos[RTE_MAX_LCORE];
117 struct used_cpu_time {
118 double insertion[MAX_PORTS][RTE_MAX_LCORE];
119 double deletion[MAX_PORTS][RTE_MAX_LCORE];
122 struct multi_cores_pool {
123 uint32_t cores_count;
124 uint32_t rules_count;
125 struct used_cpu_time meters_record;
126 struct used_cpu_time flows_record;
127 int64_t last_alloc[RTE_MAX_LCORE];
128 int64_t current_alloc[RTE_MAX_LCORE];
129 } __rte_cache_aligned;
131 static struct multi_cores_pool mc_pool = {
135 static const struct option_dict {
144 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH),
145 .map = &flow_items[0],
146 .map_idx = &items_idx
150 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4),
151 .map = &flow_items[0],
152 .map_idx = &items_idx
156 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6),
157 .map = &flow_items[0],
158 .map_idx = &items_idx
162 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN),
163 .map = &flow_items[0],
164 .map_idx = &items_idx
168 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_TCP),
169 .map = &flow_items[0],
170 .map_idx = &items_idx
174 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP),
175 .map = &flow_items[0],
176 .map_idx = &items_idx
180 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN),
181 .map = &flow_items[0],
182 .map_idx = &items_idx
186 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE),
187 .map = &flow_items[0],
188 .map_idx = &items_idx
192 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE),
193 .map = &flow_items[0],
194 .map_idx = &items_idx
198 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE),
199 .map = &flow_items[0],
200 .map_idx = &items_idx
204 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP),
205 .map = &flow_items[0],
206 .map_idx = &items_idx
210 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_META),
211 .map = &flow_items[0],
212 .map_idx = &items_idx
216 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_TAG),
217 .map = &flow_items[0],
218 .map_idx = &items_idx
222 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ICMP),
223 .map = &flow_items[0],
224 .map_idx = &items_idx
228 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ICMP6),
229 .map = &flow_items[0],
230 .map_idx = &items_idx
235 .map = &flow_attrs[0],
236 .map_idx = &attrs_idx
241 .map = &flow_attrs[0],
242 .map_idx = &attrs_idx
247 .map = &flow_attrs[0],
248 .map_idx = &attrs_idx
252 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
253 .map = &flow_actions[0],
254 .map_idx = &actions_idx
258 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
259 .map = &flow_actions[0],
260 .map_idx = &actions_idx
264 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
265 .map = &flow_actions[0],
266 .map_idx = &actions_idx
270 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
271 .map = &flow_actions[0],
272 .map_idx = &actions_idx
276 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
277 .map = &flow_actions[0],
278 .map_idx = &actions_idx
282 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
283 .map = &flow_actions[0],
284 .map_idx = &actions_idx
288 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
289 .map = &flow_actions[0],
290 .map_idx = &actions_idx
294 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
295 .map = &flow_actions[0],
296 .map_idx = &actions_idx
300 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
301 .map = &flow_actions[0],
302 .map_idx = &actions_idx
305 .str = "set-src-mac",
306 .mask = FLOW_ACTION_MASK(
307 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
309 .map = &flow_actions[0],
310 .map_idx = &actions_idx
313 .str = "set-dst-mac",
314 .mask = FLOW_ACTION_MASK(
315 RTE_FLOW_ACTION_TYPE_SET_MAC_DST
317 .map = &flow_actions[0],
318 .map_idx = &actions_idx
321 .str = "set-src-ipv4",
322 .mask = FLOW_ACTION_MASK(
323 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
325 .map = &flow_actions[0],
326 .map_idx = &actions_idx
329 .str = "set-dst-ipv4",
330 .mask = FLOW_ACTION_MASK(
331 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
333 .map = &flow_actions[0],
334 .map_idx = &actions_idx
337 .str = "set-src-ipv6",
338 .mask = FLOW_ACTION_MASK(
339 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
341 .map = &flow_actions[0],
342 .map_idx = &actions_idx
345 .str = "set-dst-ipv6",
346 .mask = FLOW_ACTION_MASK(
347 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
349 .map = &flow_actions[0],
350 .map_idx = &actions_idx
354 .mask = FLOW_ACTION_MASK(
355 RTE_FLOW_ACTION_TYPE_SET_TP_SRC
357 .map = &flow_actions[0],
358 .map_idx = &actions_idx
362 .mask = FLOW_ACTION_MASK(
363 RTE_FLOW_ACTION_TYPE_SET_TP_DST
365 .map = &flow_actions[0],
366 .map_idx = &actions_idx
369 .str = "inc-tcp-ack",
370 .mask = FLOW_ACTION_MASK(
371 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
373 .map = &flow_actions[0],
374 .map_idx = &actions_idx
377 .str = "dec-tcp-ack",
378 .mask = FLOW_ACTION_MASK(
379 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
381 .map = &flow_actions[0],
382 .map_idx = &actions_idx
385 .str = "inc-tcp-seq",
386 .mask = FLOW_ACTION_MASK(
387 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
389 .map = &flow_actions[0],
390 .map_idx = &actions_idx
393 .str = "dec-tcp-seq",
394 .mask = FLOW_ACTION_MASK(
395 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
397 .map = &flow_actions[0],
398 .map_idx = &actions_idx
402 .mask = FLOW_ACTION_MASK(
403 RTE_FLOW_ACTION_TYPE_SET_TTL
405 .map = &flow_actions[0],
406 .map_idx = &actions_idx
410 .mask = FLOW_ACTION_MASK(
411 RTE_FLOW_ACTION_TYPE_DEC_TTL
413 .map = &flow_actions[0],
414 .map_idx = &actions_idx
417 .str = "set-ipv4-dscp",
418 .mask = FLOW_ACTION_MASK(
419 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
421 .map = &flow_actions[0],
422 .map_idx = &actions_idx
425 .str = "set-ipv6-dscp",
426 .mask = FLOW_ACTION_MASK(
427 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
429 .map = &flow_actions[0],
430 .map_idx = &actions_idx
434 .mask = FLOW_ACTION_MASK(
435 RTE_FLOW_ACTION_TYPE_FLAG
437 .map = &flow_actions[0],
438 .map_idx = &actions_idx
442 .mask = FLOW_ACTION_MASK(
443 RTE_FLOW_ACTION_TYPE_METER
445 .map = &flow_actions[0],
446 .map_idx = &actions_idx
449 .str = "vxlan-encap",
450 .mask = FLOW_ACTION_MASK(
451 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
453 .map = &flow_actions[0],
454 .map_idx = &actions_idx
457 .str = "vxlan-decap",
458 .mask = FLOW_ACTION_MASK(
459 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
461 .map = &flow_actions[0],
462 .map_idx = &actions_idx
467 usage(char *progname)
469 printf("\nusage: %s\n", progname);
470 printf("\nControl configurations:\n");
471 printf(" --rules-count=N: to set the number of needed"
472 " rules to insert, default is %d\n", DEFAULT_RULES_COUNT);
473 printf(" --rules-batch=N: set number of batched rules,"
474 " default is %d\n", DEFAULT_RULES_BATCH);
475 printf(" --dump-iterations: To print rates for each"
477 printf(" --deletion-rate: Enable deletion rate"
479 printf(" --dump-socket-mem: To dump all socket memory\n");
480 printf(" --enable-fwd: To enable packets forwarding"
481 " after insertion\n");
482 printf(" --portmask=N: hexadecimal bitmask of ports used\n");
483 printf(" --random-priority=N,S: use random priority levels "
484 "from 0 to (N - 1) for flows "
485 "and S as seed for pseudo-random number generator\n");
486 printf(" --unique-data: flag to set using unique data for all"
487 " actions that support data, such as header modify and encap actions\n");
488 printf(" --meter-profile=cir,cbs,ebs: set CIR CBS EBS parameters in meter"
489 " profile, default values are %d,%d,%d\n", METER_CIR,
492 printf("To set flow attributes:\n");
493 printf(" --ingress: set ingress attribute in flows\n");
494 printf(" --egress: set egress attribute in flows\n");
495 printf(" --transfer: set transfer attribute in flows\n");
496 printf(" --group=N: set group for all flows,"
497 " default is %d\n", DEFAULT_GROUP);
498 printf(" --cores=N: to set the number of needed "
499 "cores to insert rte_flow rules, default is 1\n");
500 printf(" --rxq=N: to set the count of receive queues\n");
501 printf(" --txq=N: to set the count of send queues\n");
502 printf(" --rxd=N: to set the count of rxd\n");
503 printf(" --txd=N: to set the count of txd\n");
504 printf(" --mbuf-size=N: to set the size of mbuf\n");
505 printf(" --mbuf-cache-size=N: to set the size of mbuf cache\n");
506 printf(" --total-mbuf-count=N: to set the count of total mbuf count\n");
509 printf("To set flow items:\n");
510 printf(" --ether: add ether layer in flow items\n");
511 printf(" --vlan: add vlan layer in flow items\n");
512 printf(" --ipv4: add ipv4 layer in flow items\n");
513 printf(" --ipv6: add ipv6 layer in flow items\n");
514 printf(" --tcp: add tcp layer in flow items\n");
515 printf(" --udp: add udp layer in flow items\n");
516 printf(" --vxlan: add vxlan layer in flow items\n");
517 printf(" --vxlan-gpe: add vxlan-gpe layer in flow items\n");
518 printf(" --gre: add gre layer in flow items\n");
519 printf(" --geneve: add geneve layer in flow items\n");
520 printf(" --gtp: add gtp layer in flow items\n");
521 printf(" --meta: add meta layer in flow items\n");
522 printf(" --tag: add tag layer in flow items\n");
523 printf(" --icmpv4: add icmpv4 layer in flow items\n");
524 printf(" --icmpv6: add icmpv6 layer in flow items\n");
526 printf("To set flow actions:\n");
527 printf(" --port-id: add port-id action in flow actions\n");
528 printf(" --rss: add rss action in flow actions\n");
529 printf(" --queue: add queue action in flow actions\n");
530 printf(" --jump: add jump action in flow actions\n");
531 printf(" --mark: add mark action in flow actions\n");
532 printf(" --count: add count action in flow actions\n");
533 printf(" --set-meta: add set meta action in flow actions\n");
534 printf(" --set-tag: add set tag action in flow actions\n");
535 printf(" --drop: add drop action in flow actions\n");
536 printf(" --hairpin-queue=N: add hairpin-queue action in flow actions\n");
537 printf(" --hairpin-rss=N: add hairpin-rss action in flow actions\n");
538 printf(" --set-src-mac: add set src mac action to flow actions\n"
539 "Src mac to be set is random each flow\n");
540 printf(" --set-dst-mac: add set dst mac action to flow actions\n"
541 "Dst mac to be set is random each flow\n");
542 printf(" --set-src-ipv4: add set src ipv4 action to flow actions\n"
543 "Src ipv4 to be set is random each flow\n");
544 printf(" --set-dst-ipv4 add set dst ipv4 action to flow actions\n"
545 "Dst ipv4 to be set is random each flow\n");
546 printf(" --set-src-ipv6: add set src ipv6 action to flow actions\n"
547 "Src ipv6 to be set is random each flow\n");
548 printf(" --set-dst-ipv6: add set dst ipv6 action to flow actions\n"
549 "Dst ipv6 to be set is random each flow\n");
550 printf(" --set-src-tp: add set src tp action to flow actions\n"
551 "Src tp to be set is random each flow\n");
552 printf(" --set-dst-tp: add set dst tp action to flow actions\n"
553 "Dst tp to be set is random each flow\n");
554 printf(" --inc-tcp-ack: add inc tcp ack action to flow actions\n"
555 "tcp ack will be increments by 1\n");
556 printf(" --dec-tcp-ack: add dec tcp ack action to flow actions\n"
557 "tcp ack will be decrements by 1\n");
558 printf(" --inc-tcp-seq: add inc tcp seq action to flow actions\n"
559 "tcp seq will be increments by 1\n");
560 printf(" --dec-tcp-seq: add dec tcp seq action to flow actions\n"
561 "tcp seq will be decrements by 1\n");
562 printf(" --set-ttl: add set ttl action to flow actions\n"
563 "L3 ttl to be set is random each flow\n");
564 printf(" --dec-ttl: add dec ttl action to flow actions\n"
565 "L3 ttl will be decrements by 1\n");
566 printf(" --set-ipv4-dscp: add set ipv4 dscp action to flow actions\n"
567 "ipv4 dscp value to be set is random each flow\n");
568 printf(" --set-ipv6-dscp: add set ipv6 dscp action to flow actions\n"
569 "ipv6 dscp value to be set is random each flow\n");
570 printf(" --flag: add flag action to flow actions\n");
571 printf(" --meter: add meter action to flow actions\n");
572 printf(" --policy-mtr=\"g1,g2:y1:r1\": to create meter with specified "
573 "colored actions\n");
574 printf(" --raw-encap=<data>: add raw encap action to flow actions\n"
575 "Data is the data needed to be encaped\n"
576 "Example: raw-encap=ether,ipv4,udp,vxlan\n");
577 printf(" --raw-decap=<data>: add raw decap action to flow actions\n"
578 "Data is the data needed to be decaped\n"
579 "Example: raw-decap=ether,ipv4,udp,vxlan\n");
580 printf(" --vxlan-encap: add vxlan-encap action to flow actions\n"
581 "Encapped data is fixed with pattern: ether,ipv4,udp,vxlan\n"
582 "With fixed values\n");
583 printf(" --vxlan-decap: add vxlan_decap action to flow actions\n");
587 read_meter_policy(char *prog, char *arg)
595 token = strsep(&arg, ":\0");
596 while (token != NULL && j < RTE_COLORS) {
597 actions_str[j++] = token;
598 token = strsep(&arg, ":\0");
601 token = strtok(actions_str[0], ",\0");
602 while (token == NULL && j < RTE_COLORS - 1)
603 token = strtok(actions_str[++j], ",\0");
604 while (j < RTE_COLORS && token != NULL) {
605 for (i = 0; i < RTE_DIM(flow_options); i++) {
606 if (!strcmp(token, flow_options[i].str)) {
607 all_actions[j][k++] = flow_options[i].mask;
611 /* Reached last action with no match */
612 if (i >= RTE_DIM(flow_options)) {
613 fprintf(stderr, "Invalid colored actions: %s\n", token);
615 rte_exit(EXIT_SUCCESS, "Invalid colored actions\n");
617 token = strtok(NULL, ",\0");
618 while (!token && j < RTE_COLORS - 1) {
619 token = strtok(actions_str[++j], ",\0");
626 args_parse(int argc, char **argv)
637 static const struct option lgopts[] = {
640 { "rules-count", 1, 0, 0 },
641 { "rules-batch", 1, 0, 0 },
642 { "dump-iterations", 0, 0, 0 },
643 { "deletion-rate", 0, 0, 0 },
644 { "dump-socket-mem", 0, 0, 0 },
645 { "enable-fwd", 0, 0, 0 },
646 { "unique-data", 0, 0, 0 },
647 { "portmask", 1, 0, 0 },
648 { "cores", 1, 0, 0 },
649 { "random-priority", 1, 0, 0 },
650 { "meter-profile-alg", 1, 0, 0 },
655 { "mbuf-size", 1, 0, 0 },
656 { "mbuf-cache-size", 1, 0, 0 },
657 { "total-mbuf-count", 1, 0, 0 },
659 { "ingress", 0, 0, 0 },
660 { "egress", 0, 0, 0 },
661 { "transfer", 0, 0, 0 },
662 { "group", 1, 0, 0 },
664 { "ether", 0, 0, 0 },
670 { "vxlan", 0, 0, 0 },
671 { "vxlan-gpe", 0, 0, 0 },
673 { "geneve", 0, 0, 0 },
677 { "icmpv4", 0, 0, 0 },
678 { "icmpv6", 0, 0, 0 },
680 { "port-id", 2, 0, 0 },
682 { "queue", 0, 0, 0 },
685 { "count", 0, 0, 0 },
686 { "set-meta", 0, 0, 0 },
687 { "set-tag", 0, 0, 0 },
689 { "hairpin-queue", 1, 0, 0 },
690 { "hairpin-rss", 1, 0, 0 },
691 { "set-src-mac", 0, 0, 0 },
692 { "set-dst-mac", 0, 0, 0 },
693 { "set-src-ipv4", 0, 0, 0 },
694 { "set-dst-ipv4", 0, 0, 0 },
695 { "set-src-ipv6", 0, 0, 0 },
696 { "set-dst-ipv6", 0, 0, 0 },
697 { "set-src-tp", 0, 0, 0 },
698 { "set-dst-tp", 0, 0, 0 },
699 { "inc-tcp-ack", 0, 0, 0 },
700 { "dec-tcp-ack", 0, 0, 0 },
701 { "inc-tcp-seq", 0, 0, 0 },
702 { "dec-tcp-seq", 0, 0, 0 },
703 { "set-ttl", 0, 0, 0 },
704 { "dec-ttl", 0, 0, 0 },
705 { "set-ipv4-dscp", 0, 0, 0 },
706 { "set-ipv6-dscp", 0, 0, 0 },
708 { "meter", 0, 0, 0 },
709 { "raw-encap", 1, 0, 0 },
710 { "raw-decap", 1, 0, 0 },
711 { "vxlan-encap", 0, 0, 0 },
712 { "vxlan-decap", 0, 0, 0 },
713 { "policy-mtr", 1, 0, 0 },
714 { "meter-profile", 1, 0, 0 },
717 RTE_ETH_FOREACH_DEV(i)
718 ports_mask |= 1 << i;
720 for (i = 0; i < RTE_MAX_ETHPORTS; i++)
721 dst_ports[i] = PORT_ID_DST;
723 hairpin_queues_num = 0;
726 printf(":: Flow -> ");
727 while ((opt = getopt_long(argc, argvopt, "",
728 lgopts, &opt_idx)) != EOF) {
731 if (strcmp(lgopts[opt_idx].name, "help") == 0) {
736 if (strcmp(lgopts[opt_idx].name, "group") == 0) {
741 rte_exit(EXIT_FAILURE,
742 "flow group should be >= 0\n");
743 printf("group %d / ", flow_group);
746 for (i = 0; i < RTE_DIM(flow_options); i++)
747 if (strcmp(lgopts[opt_idx].name,
748 flow_options[i].str) == 0) {
750 (*flow_options[i].map_idx)++] =
751 flow_options[i].mask;
752 printf("%s / ", flow_options[i].str);
755 if (strcmp(lgopts[opt_idx].name,
756 "hairpin-rss") == 0) {
759 hairpin_queues_num = n;
761 rte_exit(EXIT_FAILURE,
762 "Hairpin queues should be > 0\n");
764 flow_actions[actions_idx++] =
766 printf("hairpin-rss / ");
768 if (strcmp(lgopts[opt_idx].name,
769 "hairpin-queue") == 0) {
772 hairpin_queues_num = n;
774 rte_exit(EXIT_FAILURE,
775 "Hairpin queues should be > 0\n");
777 flow_actions[actions_idx++] =
778 HAIRPIN_QUEUE_ACTION;
779 printf("hairpin-queue / ");
782 if (strcmp(lgopts[opt_idx].name, "raw-encap") == 0) {
783 printf("raw-encap ");
784 flow_actions[actions_idx++] =
786 RTE_FLOW_ACTION_TYPE_RAW_ENCAP
789 token = strtok(optarg, ",");
790 while (token != NULL) {
791 for (i = 0; i < RTE_DIM(flow_options); i++) {
792 if (strcmp(flow_options[i].str, token) == 0) {
793 printf("%s,", token);
794 encap_data |= flow_options[i].mask;
797 /* Reached last item with no match */
798 if (i == (RTE_DIM(flow_options) - 1))
799 rte_exit(EXIT_FAILURE,
800 "Invalid encap item: %s\n", token);
802 token = strtok(NULL, ",");
806 if (strcmp(lgopts[opt_idx].name, "raw-decap") == 0) {
807 printf("raw-decap ");
808 flow_actions[actions_idx++] =
810 RTE_FLOW_ACTION_TYPE_RAW_DECAP
813 token = strtok(optarg, ",");
814 while (token != NULL) {
815 for (i = 0; i < RTE_DIM(flow_options); i++) {
816 if (strcmp(flow_options[i].str, token) == 0) {
817 printf("%s,", token);
818 decap_data |= flow_options[i].mask;
821 /* Reached last item with no match */
822 if (i == (RTE_DIM(flow_options) - 1))
823 rte_exit(EXIT_FAILURE,
824 "Invalid decap item %s\n", token);
826 token = strtok(NULL, ",");
831 if (strcmp(lgopts[opt_idx].name,
832 "rules-batch") == 0) {
833 rules_batch = atoi(optarg);
835 if (strcmp(lgopts[opt_idx].name,
836 "rules-count") == 0) {
837 rules_count = atoi(optarg);
839 if (strcmp(lgopts[opt_idx].name, "random-priority") ==
842 prio = strtol(optarg, &end, 10);
843 if ((optarg[0] == '\0') || (end == NULL))
844 rte_exit(EXIT_FAILURE,
845 "Invalid value for random-priority\n");
848 seed = strtoll(token, &end, 10);
849 if ((token[0] == '\0') || (*end != '\0'))
850 rte_exit(EXIT_FAILURE,
851 "Invalid value for random-priority\n");
854 if (strcmp(lgopts[opt_idx].name,
855 "dump-iterations") == 0)
856 dump_iterations = true;
857 if (strcmp(lgopts[opt_idx].name,
860 if (strcmp(lgopts[opt_idx].name,
861 "deletion-rate") == 0)
863 if (strcmp(lgopts[opt_idx].name,
864 "dump-socket-mem") == 0)
865 dump_socket_mem_flag = true;
866 if (strcmp(lgopts[opt_idx].name,
869 if (strcmp(lgopts[opt_idx].name,
871 /* parse hexadecimal string */
873 pm = strtoull(optarg, &end, 16);
874 if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0'))
875 rte_exit(EXIT_FAILURE, "Invalid fwd port mask\n");
878 if (strcmp(lgopts[opt_idx].name,
880 uint16_t port_idx = 0;
883 token = strtok(optarg, ",");
884 while (token != NULL) {
885 dst_ports[port_idx++] = atoi(token);
886 token = strtok(NULL, ",");
889 if (strcmp(lgopts[opt_idx].name, "rxq") == 0) {
891 rx_queues_count = (uint8_t) n;
893 if (strcmp(lgopts[opt_idx].name, "txq") == 0) {
895 tx_queues_count = (uint8_t) n;
897 if (strcmp(lgopts[opt_idx].name, "rxd") == 0) {
899 rxd_count = (uint8_t) n;
901 if (strcmp(lgopts[opt_idx].name, "txd") == 0) {
903 txd_count = (uint8_t) n;
905 if (strcmp(lgopts[opt_idx].name, "mbuf-size") == 0) {
907 mbuf_size = (uint32_t) n;
909 if (strcmp(lgopts[opt_idx].name, "mbuf-cache-size") == 0) {
911 mbuf_cache_size = (uint32_t) n;
913 if (strcmp(lgopts[opt_idx].name, "total-mbuf-count") == 0) {
915 total_mbuf_num = (uint32_t) n;
917 if (strcmp(lgopts[opt_idx].name, "cores") == 0) {
919 if ((int) rte_lcore_count() <= n) {
920 rte_exit(EXIT_FAILURE,
921 "Error: you need %d cores to run on multi-cores\n"
922 "Existing cores are: %d\n", n, rte_lcore_count());
924 if (n <= RTE_MAX_LCORE && n > 0)
925 mc_pool.cores_count = n;
927 rte_exit(EXIT_FAILURE,
928 "Error: cores count must be > 0 and < %d\n",
932 if (strcmp(lgopts[opt_idx].name, "policy-mtr") == 0)
933 read_meter_policy(argv[0], optarg);
934 if (strcmp(lgopts[opt_idx].name,
935 "meter-profile") == 0) {
937 token = strsep(&optarg, ",\0");
938 while (token != NULL && i < sizeof(
939 meter_profile_values) /
941 meter_profile_values[i++] = atol(token);
942 token = strsep(&optarg, ",\0");
948 rte_exit(EXIT_FAILURE, "Invalid option: %s\n",
953 if (rules_count % rules_batch != 0) {
954 rte_exit(EXIT_FAILURE,
955 "rules_count %% rules_batch should be 0\n");
957 if (rules_count / rules_batch > MAX_BATCHES_COUNT) {
958 rte_exit(EXIT_FAILURE,
959 "rules_count / rules_batch should be <= %d\n",
963 printf("end_flow\n");
966 /* Dump the socket memory statistics on console */
968 dump_socket_mem(FILE *f)
970 struct rte_malloc_socket_stats socket_stats;
975 unsigned int n_alloc = 0;
976 unsigned int n_free = 0;
977 bool active_nodes = false;
980 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
981 if (rte_malloc_get_socket_stats(i, &socket_stats) ||
982 !socket_stats.heap_totalsz_bytes)
985 total += socket_stats.heap_totalsz_bytes;
986 alloc += socket_stats.heap_allocsz_bytes;
987 free += socket_stats.heap_freesz_bytes;
988 n_alloc += socket_stats.alloc_count;
989 n_free += socket_stats.free_count;
990 if (dump_socket_mem_flag) {
991 fprintf(f, "::::::::::::::::::::::::::::::::::::::::");
993 "\nSocket %u:\nsize(M) total: %.6lf\nalloc:"
994 " %.6lf(%.3lf%%)\nfree: %.6lf"
996 "\ncount alloc: %u\nfree: %u\n",
998 socket_stats.heap_totalsz_bytes / 1.0e6,
999 socket_stats.heap_allocsz_bytes / 1.0e6,
1000 (double)socket_stats.heap_allocsz_bytes * 100 /
1001 (double)socket_stats.heap_totalsz_bytes,
1002 socket_stats.heap_freesz_bytes / 1.0e6,
1003 socket_stats.greatest_free_size / 1.0e6,
1004 socket_stats.alloc_count,
1005 socket_stats.free_count);
1006 fprintf(f, "::::::::::::::::::::::::::::::::::::::::");
1009 if (dump_socket_mem_flag && active_nodes) {
1011 "\nTotal: size(M)\ntotal: %.6lf"
1012 "\nalloc: %.6lf(%.3lf%%)\nfree: %.6lf"
1013 "\ncount alloc: %u\nfree: %u\n",
1014 total / 1.0e6, alloc / 1.0e6,
1015 (double)alloc * 100 / (double)total, free / 1.0e6,
1017 fprintf(f, "::::::::::::::::::::::::::::::::::::::::\n");
1023 print_flow_error(struct rte_flow_error error)
1025 printf("Flow can't be created %d message: %s\n",
1027 error.message ? error.message : "(no stated reason)");
1031 print_rules_batches(double *cpu_time_per_batch)
1037 for (idx = 0; idx < MAX_BATCHES_COUNT; idx++) {
1038 if (!cpu_time_per_batch[idx])
1040 delta = (double)(rules_batch / cpu_time_per_batch[idx]);
1041 rate = delta / 1000; /* Save rate in K unit. */
1042 printf(":: Rules batch #%d: %d rules "
1043 "in %f sec[ Rate = %f K Rule/Sec ]\n",
1045 cpu_time_per_batch[idx], rate);
1054 for (i = 0; i < MAX_ACTIONS_NUM; i++) {
1055 if (flow_actions[i] == 0)
1058 & FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_METER))
1065 create_meter_policy(void)
1067 struct rte_mtr_error error;
1069 struct rte_mtr_meter_policy_params policy;
1071 struct rte_flow_action actions[RTE_COLORS][MAX_ACTIONS_NUM];
1074 memset(actions, 0, sizeof(actions));
1075 memset(&policy, 0, sizeof(policy));
1076 nr_ports = rte_eth_dev_count_avail();
1077 for (port_id = 0; port_id < nr_ports; port_id++) {
1078 for (i = 0; i < RTE_COLORS; i++)
1079 fill_actions(actions[i], all_actions[i], 0, 0, 0,
1080 0, 0, 0, unique_data, rx_queues_count,
1081 dst_ports[port_id]);
1082 policy.actions[RTE_COLOR_GREEN] = actions[RTE_COLOR_GREEN];
1083 policy.actions[RTE_COLOR_YELLOW] = actions[RTE_COLOR_YELLOW];
1084 policy.actions[RTE_COLOR_RED] = actions[RTE_COLOR_RED];
1085 policy_id[port_id] = port_id + 10;
1086 ret = rte_mtr_meter_policy_add(port_id, policy_id[port_id],
1089 fprintf(stderr, "port %d: failed to create meter policy\n",
1091 policy_id[port_id] = UINT32_MAX;
1093 memset(actions, 0, sizeof(actions));
1098 destroy_meter_policy(void)
1100 struct rte_mtr_error error;
1104 nr_ports = rte_eth_dev_count_avail();
1105 for (port_id = 0; port_id < nr_ports; port_id++) {
1106 /* If port outside portmask */
1107 if (!((ports_mask >> port_id) & 0x1))
1110 if (rte_mtr_meter_policy_delete
1111 (port_id, policy_id[port_id], &error)) {
1112 fprintf(stderr, "port %u: failed to delete meter policy\n",
1114 rte_exit(EXIT_FAILURE, "Error: Failed to delete meter policy.\n");
1120 create_meter_rule(int port_id, uint32_t counter)
1123 struct rte_mtr_params params;
1124 struct rte_mtr_error error;
1126 memset(¶ms, 0, sizeof(struct rte_mtr_params));
1127 params.meter_enable = 1;
1128 params.stats_mask = 0xffff;
1129 params.use_prev_mtr_color = 0;
1130 params.dscp_table = NULL;
1133 params.meter_profile_id = DEFAULT_METER_PROF_ID;
1136 ret = rte_mtr_create(port_id, counter, ¶ms, 1, &error);
1138 params.meter_policy_id = policy_id[port_id];
1139 ret = rte_mtr_create(port_id, counter, ¶ms, 0, &error);
1143 printf("Port %u create meter idx(%d) error(%d) message: %s\n",
1144 port_id, counter, error.type,
1145 error.message ? error.message : "(no stated reason)");
1146 rte_exit(EXIT_FAILURE, "Error in creating meter\n");
1151 destroy_meter_rule(int port_id, uint32_t counter)
1153 struct rte_mtr_error error;
1155 if (policy_mtr && policy_id[port_id] != UINT32_MAX) {
1156 if (rte_mtr_meter_policy_delete(port_id, policy_id[port_id],
1158 fprintf(stderr, "Error: Failed to delete meter policy\n");
1159 policy_id[port_id] = UINT32_MAX;
1161 if (rte_mtr_destroy(port_id, counter, &error)) {
1162 fprintf(stderr, "Port %d: Failed to delete meter.\n",
1164 rte_exit(EXIT_FAILURE, "Error in deleting meter rule");
1169 meters_handler(int port_id, uint8_t core_id, uint8_t ops)
1171 uint64_t start_batch;
1172 double cpu_time_used, insertion_rate;
1173 int rules_count_per_core, rules_batch_idx;
1174 uint32_t counter, start_counter = 0, end_counter;
1175 double cpu_time_per_batch[MAX_BATCHES_COUNT] = { 0 };
1177 rules_count_per_core = rules_count / mc_pool.cores_count;
1180 start_counter = core_id * rules_count_per_core;
1181 end_counter = (core_id + 1) * rules_count_per_core;
1184 start_batch = rte_get_timer_cycles();
1185 for (counter = start_counter; counter < end_counter; counter++) {
1186 if (ops == METER_CREATE)
1187 create_meter_rule(port_id, counter);
1189 destroy_meter_rule(port_id, counter);
1191 * Save the insertion rate for rules batch.
1192 * Check if the insertion reached the rules
1193 * patch counter, then save the insertion rate
1196 if (!((counter + 1) % rules_batch)) {
1197 rules_batch_idx = ((counter + 1) / rules_batch) - 1;
1198 cpu_time_per_batch[rules_batch_idx] =
1199 ((double)(rte_get_timer_cycles() - start_batch))
1200 / rte_get_timer_hz();
1201 cpu_time_used += cpu_time_per_batch[rules_batch_idx];
1202 start_batch = rte_get_timer_cycles();
1206 /* Print insertion rates for all batches */
1207 if (dump_iterations)
1208 print_rules_batches(cpu_time_per_batch);
1211 ((double) (rules_count_per_core / cpu_time_used) / 1000);
1213 /* Insertion rate for all rules in one core */
1214 printf(":: Port %d :: Core %d Meter %s :: start @[%d] - end @[%d],"
1215 " use:%.02fs, rate:%.02fk Rule/Sec\n",
1216 port_id, core_id, ops == METER_CREATE ? "create" : "delete",
1217 start_counter, end_counter - 1,
1218 cpu_time_used, insertion_rate);
1220 if (ops == METER_CREATE)
1221 mc_pool.meters_record.insertion[port_id][core_id]
1224 mc_pool.meters_record.deletion[port_id][core_id]
1229 destroy_meter_profile(void)
1231 struct rte_mtr_error error;
1235 nr_ports = rte_eth_dev_count_avail();
1236 for (port_id = 0; port_id < nr_ports; port_id++) {
1237 /* If port outside portmask */
1238 if (!((ports_mask >> port_id) & 0x1))
1241 if (rte_mtr_meter_profile_delete
1242 (port_id, DEFAULT_METER_PROF_ID, &error)) {
1243 printf("Port %u del profile error(%d) message: %s\n",
1244 port_id, error.type,
1245 error.message ? error.message : "(no stated reason)");
1246 rte_exit(EXIT_FAILURE, "Error: Destroy meter profile Failed!\n");
1252 create_meter_profile(void)
1256 struct rte_mtr_meter_profile mp;
1257 struct rte_mtr_error error;
1260 *currently , only create one meter file for one port
1261 *1 meter profile -> N meter rules -> N rte flows
1263 memset(&mp, 0, sizeof(struct rte_mtr_meter_profile));
1264 nr_ports = rte_eth_dev_count_avail();
1265 for (port_id = 0; port_id < nr_ports; port_id++) {
1266 /* If port outside portmask */
1267 if (!((ports_mask >> port_id) & 0x1))
1269 mp.alg = RTE_MTR_SRTCM_RFC2697;
1270 mp.srtcm_rfc2697.cir = meter_profile_values[0] ?
1271 meter_profile_values[0] : METER_CIR;
1272 mp.srtcm_rfc2697.cbs = meter_profile_values[1] ?
1273 meter_profile_values[1] : METER_CIR / 8;
1274 mp.srtcm_rfc2697.ebs = meter_profile_values[2];
1275 ret = rte_mtr_meter_profile_add
1276 (port_id, DEFAULT_METER_PROF_ID, &mp, &error);
1278 printf("Port %u create Profile error(%d) message: %s\n",
1279 port_id, error.type,
1280 error.message ? error.message : "(no stated reason)");
1281 rte_exit(EXIT_FAILURE, "Error: Creation meter profile Failed!\n");
1287 destroy_flows(int port_id, uint8_t core_id, struct rte_flow **flows_list)
1289 struct rte_flow_error error;
1290 clock_t start_batch, end_batch;
1291 double cpu_time_used = 0;
1292 double deletion_rate;
1293 double cpu_time_per_batch[MAX_BATCHES_COUNT] = { 0 };
1296 int rules_batch_idx;
1297 int rules_count_per_core;
1299 rules_count_per_core = rules_count / mc_pool.cores_count;
1300 /* If group > 0 , should add 1 flow which created in group 0 */
1301 if (flow_group > 0 && core_id == 0)
1302 rules_count_per_core++;
1304 start_batch = rte_get_timer_cycles();
1305 for (i = 0; i < (uint32_t) rules_count_per_core; i++) {
1306 if (flows_list[i] == 0)
1309 memset(&error, 0x33, sizeof(error));
1310 if (rte_flow_destroy(port_id, flows_list[i], &error)) {
1311 print_flow_error(error);
1312 rte_exit(EXIT_FAILURE, "Error in deleting flow\n");
1316 * Save the deletion rate for rules batch.
1317 * Check if the deletion reached the rules
1318 * patch counter, then save the deletion rate
1321 if (!((i + 1) % rules_batch)) {
1322 end_batch = rte_get_timer_cycles();
1323 delta = (double) (end_batch - start_batch);
1324 rules_batch_idx = ((i + 1) / rules_batch) - 1;
1325 cpu_time_per_batch[rules_batch_idx] = delta / rte_get_timer_hz();
1326 cpu_time_used += cpu_time_per_batch[rules_batch_idx];
1327 start_batch = rte_get_timer_cycles();
1331 /* Print deletion rates for all batches */
1332 if (dump_iterations)
1333 print_rules_batches(cpu_time_per_batch);
1335 /* Deletion rate for all rules */
1336 deletion_rate = ((double) (rules_count_per_core / cpu_time_used) / 1000);
1337 printf(":: Port %d :: Core %d :: Rules deletion rate -> %f K Rule/Sec\n",
1338 port_id, core_id, deletion_rate);
1339 printf(":: Port %d :: Core %d :: The time for deleting %d rules is %f seconds\n",
1340 port_id, core_id, rules_count_per_core, cpu_time_used);
1342 mc_pool.flows_record.deletion[port_id][core_id] = cpu_time_used;
1345 static struct rte_flow **
1346 insert_flows(int port_id, uint8_t core_id, uint16_t dst_port_id)
1348 struct rte_flow **flows_list;
1349 struct rte_flow_error error;
1350 clock_t start_batch, end_batch;
1351 double first_flow_latency;
1352 double cpu_time_used;
1353 double insertion_rate;
1354 double cpu_time_per_batch[MAX_BATCHES_COUNT] = { 0 };
1356 uint32_t flow_index;
1357 uint32_t counter, start_counter = 0, end_counter;
1358 uint64_t global_items[MAX_ITEMS_NUM] = { 0 };
1359 uint64_t global_actions[MAX_ACTIONS_NUM] = { 0 };
1360 int rules_batch_idx;
1361 int rules_count_per_core;
1363 rules_count_per_core = rules_count / mc_pool.cores_count;
1365 /* Set boundaries of rules for each core. */
1367 start_counter = core_id * rules_count_per_core;
1368 end_counter = (core_id + 1) * rules_count_per_core;
1370 global_items[0] = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH);
1371 global_actions[0] = FLOW_ITEM_MASK(RTE_FLOW_ACTION_TYPE_JUMP);
1373 flows_list = rte_zmalloc("flows_list",
1374 (sizeof(struct rte_flow *) * rules_count_per_core) + 1, 0);
1375 if (flows_list == NULL)
1376 rte_exit(EXIT_FAILURE, "No Memory available!\n");
1380 if (flow_group > 0 && core_id == 0) {
1382 * Create global rule to jump into flow_group,
1383 * this way the app will avoid the default rules.
1385 * This rule will be created only once.
1388 * group 0 eth / end actions jump group <flow_group>
1390 flow = generate_flow(port_id, 0, flow_attrs,
1391 global_items, global_actions,
1392 flow_group, 0, 0, 0, 0, dst_port_id, core_id,
1393 rx_queues_count, unique_data, max_priority, &error);
1396 print_flow_error(error);
1397 rte_exit(EXIT_FAILURE, "Error in creating flow\n");
1399 flows_list[flow_index++] = flow;
1402 start_batch = rte_get_timer_cycles();
1403 for (counter = start_counter; counter < end_counter; counter++) {
1404 flow = generate_flow(port_id, flow_group,
1405 flow_attrs, flow_items, flow_actions,
1406 JUMP_ACTION_TABLE, counter,
1407 hairpin_queues_num, encap_data,
1408 decap_data, dst_port_id,
1409 core_id, rx_queues_count,
1410 unique_data, max_priority, &error);
1413 first_flow_latency = (double) (rte_get_timer_cycles() - start_batch);
1414 first_flow_latency /= rte_get_timer_hz();
1415 /* In millisecond */
1416 first_flow_latency *= 1000;
1417 printf(":: First Flow Latency :: Port %d :: First flow "
1418 "installed in %f milliseconds\n",
1419 port_id, first_flow_latency);
1423 counter = end_counter;
1426 print_flow_error(error);
1427 rte_exit(EXIT_FAILURE, "Error in creating flow\n");
1430 flows_list[flow_index++] = flow;
1433 * Save the insertion rate for rules batch.
1434 * Check if the insertion reached the rules
1435 * patch counter, then save the insertion rate
1438 if (!((counter + 1) % rules_batch)) {
1439 end_batch = rte_get_timer_cycles();
1440 delta = (double) (end_batch - start_batch);
1441 rules_batch_idx = ((counter + 1) / rules_batch) - 1;
1442 cpu_time_per_batch[rules_batch_idx] = delta / rte_get_timer_hz();
1443 cpu_time_used += cpu_time_per_batch[rules_batch_idx];
1444 start_batch = rte_get_timer_cycles();
1448 /* Print insertion rates for all batches */
1449 if (dump_iterations)
1450 print_rules_batches(cpu_time_per_batch);
1452 printf(":: Port %d :: Core %d boundaries :: start @[%d] - end @[%d]\n",
1453 port_id, core_id, start_counter, end_counter - 1);
1455 /* Insertion rate for all rules in one core */
1456 insertion_rate = ((double) (rules_count_per_core / cpu_time_used) / 1000);
1457 printf(":: Port %d :: Core %d :: Rules insertion rate -> %f K Rule/Sec\n",
1458 port_id, core_id, insertion_rate);
1459 printf(":: Port %d :: Core %d :: The time for creating %d in rules %f seconds\n",
1460 port_id, core_id, rules_count_per_core, cpu_time_used);
1462 mc_pool.flows_record.insertion[port_id][core_id] = cpu_time_used;
1467 flows_handler(uint8_t core_id)
1469 struct rte_flow **flows_list;
1470 uint16_t port_idx = 0;
1474 nr_ports = rte_eth_dev_count_avail();
1476 if (rules_batch > rules_count)
1477 rules_batch = rules_count;
1479 printf(":: Rules Count per port: %d\n\n", rules_count);
1481 for (port_id = 0; port_id < nr_ports; port_id++) {
1482 /* If port outside portmask */
1483 if (!((ports_mask >> port_id) & 0x1))
1486 /* Insertion part. */
1487 mc_pool.last_alloc[core_id] = (int64_t)dump_socket_mem(stdout);
1489 meters_handler(port_id, core_id, METER_CREATE);
1490 flows_list = insert_flows(port_id, core_id,
1491 dst_ports[port_idx++]);
1492 if (flows_list == NULL)
1493 rte_exit(EXIT_FAILURE, "Error: Insertion Failed!\n");
1494 mc_pool.current_alloc[core_id] = (int64_t)dump_socket_mem(stdout);
1496 /* Deletion part. */
1498 destroy_flows(port_id, core_id, flows_list);
1500 meters_handler(port_id, core_id, METER_DELETE);
1506 dump_used_cpu_time(const char *item,
1507 uint16_t port, struct used_cpu_time *used_time)
1510 /* Latency: total count of rte rules divided
1511 * over max time used by thread between all
1514 * Throughput: total count of rte rules divided
1515 * over the average of the time cosumed by all
1518 double insertion_latency_time;
1519 double insertion_throughput_time;
1520 double deletion_latency_time;
1521 double deletion_throughput_time;
1522 double insertion_latency, insertion_throughput;
1523 double deletion_latency, deletion_throughput;
1525 /* Save first insertion/deletion rates from first thread.
1526 * Start comparing with all threads, if any thread used
1527 * time more than current saved, replace it.
1529 * Thus in the end we will have the max time used for
1530 * insertion/deletion by one thread.
1532 * As for memory consumption, save the min of all threads
1533 * of last alloc, and save the max for all threads for
1537 insertion_latency_time = used_time->insertion[port][0];
1538 deletion_latency_time = used_time->deletion[port][0];
1539 insertion_throughput_time = used_time->insertion[port][0];
1540 deletion_throughput_time = used_time->deletion[port][0];
1542 i = mc_pool.cores_count;
1544 insertion_throughput_time += used_time->insertion[port][i];
1545 deletion_throughput_time += used_time->deletion[port][i];
1546 if (insertion_latency_time < used_time->insertion[port][i])
1547 insertion_latency_time = used_time->insertion[port][i];
1548 if (deletion_latency_time < used_time->deletion[port][i])
1549 deletion_latency_time = used_time->deletion[port][i];
1552 insertion_latency = ((double) (mc_pool.rules_count
1553 / insertion_latency_time) / 1000);
1554 deletion_latency = ((double) (mc_pool.rules_count
1555 / deletion_latency_time) / 1000);
1557 insertion_throughput_time /= mc_pool.cores_count;
1558 deletion_throughput_time /= mc_pool.cores_count;
1559 insertion_throughput = ((double) (mc_pool.rules_count
1560 / insertion_throughput_time) / 1000);
1561 deletion_throughput = ((double) (mc_pool.rules_count
1562 / deletion_throughput_time) / 1000);
1565 printf("\n%s\n:: [Latency | Insertion] All Cores :: Port %d :: ",
1567 printf("Total flows insertion rate -> %f K Rules/Sec\n",
1569 printf(":: [Latency | Insertion] All Cores :: Port %d :: ", port);
1570 printf("The time for creating %d rules is %f seconds\n",
1571 mc_pool.rules_count, insertion_latency_time);
1573 /* Throughput stats */
1574 printf(":: [Throughput | Insertion] All Cores :: Port %d :: ", port);
1575 printf("Total flows insertion rate -> %f K Rules/Sec\n",
1576 insertion_throughput);
1577 printf(":: [Throughput | Insertion] All Cores :: Port %d :: ", port);
1578 printf("The average time for creating %d rules is %f seconds\n",
1579 mc_pool.rules_count, insertion_throughput_time);
1583 printf(":: [Latency | Deletion] All Cores :: Port %d :: Total "
1584 "deletion rate -> %f K Rules/Sec\n",
1585 port, deletion_latency);
1586 printf(":: [Latency | Deletion] All Cores :: Port %d :: ",
1588 printf("The time for deleting %d rules is %f seconds\n",
1589 mc_pool.rules_count, deletion_latency_time);
1591 /* Throughput stats */
1592 printf(":: [Throughput | Deletion] All Cores :: Port %d :: Total "
1593 "deletion rate -> %f K Rules/Sec\n",
1594 port, deletion_throughput);
1595 printf(":: [Throughput | Deletion] All Cores :: Port %d :: ",
1597 printf("The average time for deleting %d rules is %f seconds\n",
1598 mc_pool.rules_count, deletion_throughput_time);
1603 dump_used_mem(uint16_t port)
1606 int64_t last_alloc, current_alloc;
1607 int flow_size_in_bytes;
1609 last_alloc = mc_pool.last_alloc[0];
1610 current_alloc = mc_pool.current_alloc[0];
1612 i = mc_pool.cores_count;
1614 if (last_alloc > mc_pool.last_alloc[i])
1615 last_alloc = mc_pool.last_alloc[i];
1616 if (current_alloc < mc_pool.current_alloc[i])
1617 current_alloc = mc_pool.current_alloc[i];
1620 flow_size_in_bytes = (current_alloc - last_alloc) / mc_pool.rules_count;
1621 printf("\n:: Port %d :: rte_flow size in DPDK layer: %d Bytes\n",
1622 port, flow_size_in_bytes);
1626 run_rte_flow_handler_cores(void *data __rte_unused)
1629 int lcore_counter = 0;
1630 int lcore_id = rte_lcore_id();
1633 RTE_LCORE_FOREACH(i) {
1634 /* If core not needed return. */
1635 if (lcore_id == i) {
1636 printf(":: lcore %d mapped with index %d\n", lcore_id, lcore_counter);
1637 if (lcore_counter >= (int) mc_pool.cores_count)
1643 lcore_id = lcore_counter;
1645 if (lcore_id >= (int) mc_pool.cores_count)
1648 mc_pool.rules_count = rules_count;
1650 flows_handler(lcore_id);
1652 /* Only main core to print total results. */
1656 /* Make sure all cores finished insertion/deletion process. */
1657 rte_eal_mp_wait_lcore();
1659 RTE_ETH_FOREACH_DEV(port) {
1660 /* If port outside portmask */
1661 if (!((ports_mask >> port) & 0x1))
1664 dump_used_cpu_time("Meters:",
1665 port, &mc_pool.meters_record);
1666 dump_used_cpu_time("Flows:",
1667 port, &mc_pool.flows_record);
1668 dump_used_mem(port);
1675 signal_handler(int signum)
1677 if (signum == SIGINT || signum == SIGTERM) {
1678 printf("\n\nSignal %d received, preparing to exit...\n",
1680 printf("Error: Stats are wrong due to sudden signal!\n\n");
1685 static inline uint16_t
1686 do_rx(struct lcore_info *li, uint16_t rx_port, uint16_t rx_queue)
1689 cnt = rte_eth_rx_burst(rx_port, rx_queue, li->pkts, MAX_PKT_BURST);
1695 do_tx(struct lcore_info *li, uint16_t cnt, uint16_t tx_port,
1701 nr_tx = rte_eth_tx_burst(tx_port, tx_queue, li->pkts, cnt);
1702 li->tx_pkts += nr_tx;
1703 li->tx_drops += cnt - nr_tx;
1705 for (i = nr_tx; i < cnt; i++)
1706 rte_pktmbuf_free(li->pkts[i]);
1710 * Method to convert numbers into pretty numbers that easy
1711 * to read. The design here is to add comma after each three
1712 * digits and set all of this inside buffer.
1714 * For example if n = 1799321, the output will be
1715 * 1,799,321 after this method which is easier to read.
1718 pretty_number(uint64_t n, char *buf)
1725 sprintf(p[i], "%03d", (int)(n % 1000));
1730 sprintf(p[i++], "%d", (int)n);
1733 off += sprintf(buf + off, "%s,", p[i]);
1734 buf[strlen(buf) - 1] = '\0';
1740 packet_per_second_stats(void)
1742 struct lcore_info *old;
1743 struct lcore_info *li, *oli;
1747 old = rte_zmalloc("old",
1748 sizeof(struct lcore_info) * RTE_MAX_LCORE, 0);
1750 rte_exit(EXIT_FAILURE, "No Memory available!\n");
1752 memcpy(old, lcore_infos,
1753 sizeof(struct lcore_info) * RTE_MAX_LCORE);
1755 while (!force_quit) {
1756 uint64_t total_tx_pkts = 0;
1757 uint64_t total_rx_pkts = 0;
1758 uint64_t total_tx_drops = 0;
1759 uint64_t tx_delta, rx_delta, drops_delta;
1761 int nr_valid_core = 0;
1766 char go_up_nr_lines[16];
1768 sprintf(go_up_nr_lines, "%c[%dA\r", 27, nr_lines);
1769 printf("%s\r", go_up_nr_lines);
1772 printf("\n%6s %16s %16s %16s\n", "core", "tx", "tx drops", "rx");
1773 printf("%6s %16s %16s %16s\n", "------", "----------------",
1774 "----------------", "----------------");
1776 for (i = 0; i < RTE_MAX_LCORE; i++) {
1777 li = &lcore_infos[i];
1779 if (li->mode != LCORE_MODE_PKT)
1782 tx_delta = li->tx_pkts - oli->tx_pkts;
1783 rx_delta = li->rx_pkts - oli->rx_pkts;
1784 drops_delta = li->tx_drops - oli->tx_drops;
1785 printf("%6d %16s %16s %16s\n", i,
1786 pretty_number(tx_delta, buf[0]),
1787 pretty_number(drops_delta, buf[1]),
1788 pretty_number(rx_delta, buf[2]));
1790 total_tx_pkts += tx_delta;
1791 total_rx_pkts += rx_delta;
1792 total_tx_drops += drops_delta;
1798 if (nr_valid_core > 1) {
1799 printf("%6s %16s %16s %16s\n", "total",
1800 pretty_number(total_tx_pkts, buf[0]),
1801 pretty_number(total_tx_drops, buf[1]),
1802 pretty_number(total_rx_pkts, buf[2]));
1806 memcpy(old, lcore_infos,
1807 sizeof(struct lcore_info) * RTE_MAX_LCORE);
1812 start_forwarding(void *data __rte_unused)
1814 int lcore = rte_lcore_id();
1817 struct lcore_info *li = &lcore_infos[lcore];
1822 if (li->mode == LCORE_MODE_STATS) {
1823 printf(":: started stats on lcore %u\n", lcore);
1824 packet_per_second_stats();
1829 for (stream_id = 0; stream_id < MAX_STREAMS; stream_id++) {
1830 if (li->streams[stream_id].rx_port == -1)
1834 li->streams[stream_id].rx_port,
1835 li->streams[stream_id].rx_queue);
1838 li->streams[stream_id].tx_port,
1839 li->streams[stream_id].tx_queue);
1845 init_lcore_info(void)
1853 int streams_per_core;
1854 int unassigned_streams;
1856 nr_port = rte_eth_dev_count_avail();
1858 /* First logical core is reserved for stats printing */
1859 lcore = rte_get_next_lcore(-1, 0, 0);
1860 lcore_infos[lcore].mode = LCORE_MODE_STATS;
1863 * Initialize all cores
1864 * All cores at first must have -1 value in all streams
1865 * This means that this stream is not used, or not set
1868 for (i = 0; i < RTE_MAX_LCORE; i++)
1869 for (j = 0; j < MAX_STREAMS; j++) {
1870 lcore_infos[i].streams[j].tx_port = -1;
1871 lcore_infos[i].streams[j].rx_port = -1;
1872 lcore_infos[i].streams[j].tx_queue = -1;
1873 lcore_infos[i].streams[j].rx_queue = -1;
1874 lcore_infos[i].streams_nb = 0;
1878 * Calculate the total streams count.
1879 * Also distribute those streams count between the available
1880 * logical cores except first core, since it's reserved for
1883 nb_fwd_streams = nr_port * rx_queues_count;
1884 if ((int)(nb_lcores - 1) >= nb_fwd_streams)
1885 for (i = 0; i < (int)(nb_lcores - 1); i++) {
1886 lcore = rte_get_next_lcore(lcore, 0, 0);
1887 lcore_infos[lcore].streams_nb = 1;
1890 streams_per_core = nb_fwd_streams / (nb_lcores - 1);
1891 unassigned_streams = nb_fwd_streams % (nb_lcores - 1);
1892 for (i = 0; i < (int)(nb_lcores - 1); i++) {
1893 lcore = rte_get_next_lcore(lcore, 0, 0);
1894 lcore_infos[lcore].streams_nb = streams_per_core;
1895 if (unassigned_streams) {
1896 lcore_infos[lcore].streams_nb++;
1897 unassigned_streams--;
1903 * Set the streams for the cores according to each logical
1904 * core stream count.
1905 * The streams is built on the design of what received should
1906 * forward as well, this means that if you received packets on
1907 * port 0 queue 0 then the same queue should forward the
1908 * packets, using the same logical core.
1910 lcore = rte_get_next_lcore(-1, 0, 0);
1911 for (port = 0; port < nr_port; port++) {
1912 /* Create FWD stream */
1913 for (queue = 0; queue < rx_queues_count; queue++) {
1914 if (!lcore_infos[lcore].streams_nb ||
1915 !(stream_id % lcore_infos[lcore].streams_nb)) {
1916 lcore = rte_get_next_lcore(lcore, 0, 0);
1917 lcore_infos[lcore].mode = LCORE_MODE_PKT;
1920 lcore_infos[lcore].streams[stream_id].rx_queue = queue;
1921 lcore_infos[lcore].streams[stream_id].tx_queue = queue;
1922 lcore_infos[lcore].streams[stream_id].rx_port = port;
1923 lcore_infos[lcore].streams[stream_id].tx_port = port;
1928 /* Print all streams */
1929 printf(":: Stream -> core id[N]: (rx_port, rx_queue)->(tx_port, tx_queue)\n");
1930 for (i = 0; i < RTE_MAX_LCORE; i++)
1931 for (j = 0; j < MAX_STREAMS; j++) {
1932 /* No streams for this core */
1933 if (lcore_infos[i].streams[j].tx_port == -1)
1935 printf("Stream -> core id[%d]: (%d,%d)->(%d,%d)\n",
1937 lcore_infos[i].streams[j].rx_port,
1938 lcore_infos[i].streams[j].rx_queue,
1939 lcore_infos[i].streams[j].tx_port,
1940 lcore_infos[i].streams[j].tx_queue);
1949 uint16_t hairpin_queue;
1953 struct rte_eth_hairpin_conf hairpin_conf = {
1956 struct rte_eth_conf port_conf = {
1962 struct rte_eth_txconf txq_conf;
1963 struct rte_eth_rxconf rxq_conf;
1964 struct rte_eth_dev_info dev_info;
1966 nr_queues = rx_queues_count;
1967 if (hairpin_queues_num != 0)
1968 nr_queues = rx_queues_count + hairpin_queues_num;
1970 nr_ports = rte_eth_dev_count_avail();
1972 rte_exit(EXIT_FAILURE, "Error: no port detected\n");
1974 mbuf_mp = rte_pktmbuf_pool_create("mbuf_pool",
1975 total_mbuf_num, mbuf_cache_size,
1978 if (mbuf_mp == NULL)
1979 rte_exit(EXIT_FAILURE, "Error: can't init mbuf pool\n");
1981 for (port_id = 0; port_id < nr_ports; port_id++) {
1982 uint64_t rx_metadata = 0;
1984 rx_metadata |= RTE_ETH_RX_METADATA_USER_FLAG;
1985 rx_metadata |= RTE_ETH_RX_METADATA_USER_MARK;
1987 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_metadata);
1989 if (!(rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG)) {
1990 printf(":: flow action FLAG will not affect Rx mbufs on port=%u\n",
1994 if (!(rx_metadata & RTE_ETH_RX_METADATA_USER_MARK)) {
1995 printf(":: flow action MARK will not affect Rx mbufs on port=%u\n",
1998 } else if (ret != -ENOTSUP) {
1999 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port=%u: %s\n",
2000 port_id, rte_strerror(-ret));
2003 ret = rte_eth_dev_info_get(port_id, &dev_info);
2005 rte_exit(EXIT_FAILURE,
2006 "Error during getting device"
2007 " (port %u) info: %s\n",
2008 port_id, strerror(-ret));
2010 port_conf.txmode.offloads &= dev_info.tx_offload_capa;
2011 port_conf.rxmode.offloads &= dev_info.rx_offload_capa;
2013 printf(":: initializing port: %d\n", port_id);
2015 ret = rte_eth_dev_configure(port_id, nr_queues,
2016 nr_queues, &port_conf);
2018 rte_exit(EXIT_FAILURE,
2019 ":: cannot configure device: err=%d, port=%u\n",
2022 rxq_conf = dev_info.default_rxconf;
2023 for (std_queue = 0; std_queue < rx_queues_count; std_queue++) {
2024 ret = rte_eth_rx_queue_setup(port_id, std_queue, rxd_count,
2025 rte_eth_dev_socket_id(port_id),
2029 rte_exit(EXIT_FAILURE,
2030 ":: Rx queue setup failed: err=%d, port=%u\n",
2034 txq_conf = dev_info.default_txconf;
2035 for (std_queue = 0; std_queue < tx_queues_count; std_queue++) {
2036 ret = rte_eth_tx_queue_setup(port_id, std_queue, txd_count,
2037 rte_eth_dev_socket_id(port_id),
2040 rte_exit(EXIT_FAILURE,
2041 ":: Tx queue setup failed: err=%d, port=%u\n",
2045 /* Catch all packets from traffic generator. */
2046 ret = rte_eth_promiscuous_enable(port_id);
2048 rte_exit(EXIT_FAILURE,
2049 ":: promiscuous mode enable failed: err=%s, port=%u\n",
2050 rte_strerror(-ret), port_id);
2052 if (hairpin_queues_num != 0) {
2054 * Configure peer which represents hairpin Tx.
2055 * Hairpin queue numbers start after standard queues
2056 * (rx_queues_count and tx_queues_count).
2058 for (hairpin_queue = rx_queues_count, std_queue = 0;
2059 hairpin_queue < nr_queues;
2060 hairpin_queue++, std_queue++) {
2061 hairpin_conf.peers[0].port = port_id;
2062 hairpin_conf.peers[0].queue =
2063 std_queue + tx_queues_count;
2064 ret = rte_eth_rx_hairpin_queue_setup(
2065 port_id, hairpin_queue,
2066 rxd_count, &hairpin_conf);
2068 rte_exit(EXIT_FAILURE,
2069 ":: Hairpin rx queue setup failed: err=%d, port=%u\n",
2073 for (hairpin_queue = tx_queues_count, std_queue = 0;
2074 hairpin_queue < nr_queues;
2075 hairpin_queue++, std_queue++) {
2076 hairpin_conf.peers[0].port = port_id;
2077 hairpin_conf.peers[0].queue =
2078 std_queue + rx_queues_count;
2079 ret = rte_eth_tx_hairpin_queue_setup(
2080 port_id, hairpin_queue,
2081 txd_count, &hairpin_conf);
2083 rte_exit(EXIT_FAILURE,
2084 ":: Hairpin tx queue setup failed: err=%d, port=%u\n",
2089 ret = rte_eth_dev_start(port_id);
2091 rte_exit(EXIT_FAILURE,
2092 "rte_eth_dev_start:err=%d, port=%u\n",
2095 printf(":: initializing port: %d done\n", port_id);
2100 main(int argc, char **argv)
2104 struct rte_flow_error error;
2106 ret = rte_eal_init(argc, argv);
2108 rte_exit(EXIT_FAILURE, "EAL init failed\n");
2111 dump_iterations = false;
2112 rules_count = DEFAULT_RULES_COUNT;
2113 rules_batch = DEFAULT_RULES_BATCH;
2114 delete_flag = false;
2115 dump_socket_mem_flag = false;
2116 flow_group = DEFAULT_GROUP;
2117 unique_data = false;
2119 rx_queues_count = (uint8_t) RXQ_NUM;
2120 tx_queues_count = (uint8_t) TXQ_NUM;
2121 rxd_count = (uint8_t) NR_RXD;
2122 txd_count = (uint8_t) NR_TXD;
2123 mbuf_size = (uint32_t) MBUF_SIZE;
2124 mbuf_cache_size = (uint32_t) MBUF_CACHE_SIZE;
2125 total_mbuf_num = (uint32_t) TOTAL_MBUF_NUM;
2127 signal(SIGINT, signal_handler);
2128 signal(SIGTERM, signal_handler);
2133 args_parse(argc, argv);
2137 nb_lcores = rte_lcore_count();
2139 rte_exit(EXIT_FAILURE, "This app needs at least two cores\n");
2141 printf(":: Flows Count per port: %d\n\n", rules_count);
2143 rte_srand(rand_seed);
2146 create_meter_profile();
2148 create_meter_policy();
2150 rte_eal_mp_remote_launch(run_rte_flow_handler_cores, NULL, CALL_MAIN);
2154 rte_eal_mp_remote_launch(start_forwarding, NULL, CALL_MAIN);
2156 if (has_meter() && delete_flag) {
2157 destroy_meter_profile();
2159 destroy_meter_policy();
2162 RTE_ETH_FOREACH_DEV(port) {
2163 rte_flow_flush(port, &error);
2164 if (rte_eth_dev_stop(port) != 0)
2165 printf("Failed to stop device on port %u\n", port);
2166 rte_eth_dev_close(port);
2168 printf("\nBye ...\n");