4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_ethdev.h>
40 #include <rte_cycles.h>
41 #include <rte_lcore.h>
44 #include <rte_flow_classify.h>
45 #include <rte_table_acl.h>
47 #define RX_RING_SIZE 128
48 #define TX_RING_SIZE 512
50 #define NUM_MBUFS 8191
51 #define MBUF_CACHE_SIZE 250
54 #define MAX_NUM_CLASSIFY 30
55 #define FLOW_CLASSIFY_MAX_RULE_NUM 91
56 #define FLOW_CLASSIFY_MAX_PRIORITY 8
57 #define FLOW_CLASSIFIER_NAME_SIZE 64
59 #define COMMENT_LEAD_CHAR ('#')
60 #define OPTION_RULE_IPV4 "rule_ipv4"
61 #define RTE_LOGTYPE_FLOW_CLASSIFY RTE_LOGTYPE_USER3
62 #define flow_classify_log(format, ...) \
63 RTE_LOG(ERR, FLOW_CLASSIFY, format, ##__VA_ARGS__)
65 #define uint32_t_to_char(ip, a, b, c, d) do {\
66 *a = (unsigned char)(ip >> 24 & 0xff);\
67 *b = (unsigned char)(ip >> 16 & 0xff);\
68 *c = (unsigned char)(ip >> 8 & 0xff);\
69 *d = (unsigned char)(ip & 0xff);\
87 const char *rule_ipv4_name;
89 const char cb_port_delim[] = ":";
91 static const struct rte_eth_conf port_conf_default = {
92 .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
95 struct flow_classifier {
96 struct rte_flow_classifier *cls;
97 uint32_t table_id[RTE_FLOW_CLASSIFY_TABLE_MAX];
100 struct flow_classifier_acl {
101 struct flow_classifier cls;
102 } __rte_cache_aligned;
104 /* ACL field definitions for IPv4 5 tuple rule */
119 SRCP_DESTP_INPUT_IPV4
122 static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
123 /* first input field - always one byte long. */
125 .type = RTE_ACL_FIELD_TYPE_BITMASK,
126 .size = sizeof(uint8_t),
127 .field_index = PROTO_FIELD_IPV4,
128 .input_index = PROTO_INPUT_IPV4,
129 .offset = sizeof(struct ether_hdr) +
130 offsetof(struct ipv4_hdr, next_proto_id),
132 /* next input field (IPv4 source address) - 4 consecutive bytes. */
134 /* rte_flow uses a bit mask for IPv4 addresses */
135 .type = RTE_ACL_FIELD_TYPE_BITMASK,
136 .size = sizeof(uint32_t),
137 .field_index = SRC_FIELD_IPV4,
138 .input_index = SRC_INPUT_IPV4,
139 .offset = sizeof(struct ether_hdr) +
140 offsetof(struct ipv4_hdr, src_addr),
142 /* next input field (IPv4 destination address) - 4 consecutive bytes. */
144 /* rte_flow uses a bit mask for IPv4 addresses */
145 .type = RTE_ACL_FIELD_TYPE_BITMASK,
146 .size = sizeof(uint32_t),
147 .field_index = DST_FIELD_IPV4,
148 .input_index = DST_INPUT_IPV4,
149 .offset = sizeof(struct ether_hdr) +
150 offsetof(struct ipv4_hdr, dst_addr),
153 * Next 2 fields (src & dst ports) form 4 consecutive bytes.
154 * They share the same input index.
157 /* rte_flow uses a bit mask for protocol ports */
158 .type = RTE_ACL_FIELD_TYPE_BITMASK,
159 .size = sizeof(uint16_t),
160 .field_index = SRCP_FIELD_IPV4,
161 .input_index = SRCP_DESTP_INPUT_IPV4,
162 .offset = sizeof(struct ether_hdr) +
163 sizeof(struct ipv4_hdr) +
164 offsetof(struct tcp_hdr, src_port),
167 /* rte_flow uses a bit mask for protocol ports */
168 .type = RTE_ACL_FIELD_TYPE_BITMASK,
169 .size = sizeof(uint16_t),
170 .field_index = DSTP_FIELD_IPV4,
171 .input_index = SRCP_DESTP_INPUT_IPV4,
172 .offset = sizeof(struct ether_hdr) +
173 sizeof(struct ipv4_hdr) +
174 offsetof(struct tcp_hdr, dst_port),
178 /* flow classify data */
179 static int num_classify_rules;
180 static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
181 static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
182 static struct rte_flow_classify_stats classify_stats = {
183 .stats = (void **)&ntuple_stats
186 /* parameters for rte_flow_classify_validate and
187 * rte_flow_classify_table_entry_add functions
190 static struct rte_flow_item eth_item = { RTE_FLOW_ITEM_TYPE_ETH,
192 static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END,
196 * "actions count / end"
198 static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, 0};
199 static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
200 static struct rte_flow_action actions[2];
202 /* sample attributes */
203 static struct rte_flow_attr attr;
205 /* flow_classify.c: * Based on DPDK skeleton forwarding example. */
208 * Initializes a given port using global settings and with the RX buffers
209 * coming from the mbuf_pool passed as a parameter.
212 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
214 struct rte_eth_conf port_conf = port_conf_default;
215 struct ether_addr addr;
216 const uint16_t rx_rings = 1, tx_rings = 1;
220 if (port >= rte_eth_dev_count())
223 /* Configure the Ethernet device. */
224 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
228 /* Allocate and set up 1 RX queue per Ethernet port. */
229 for (q = 0; q < rx_rings; q++) {
230 retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
231 rte_eth_dev_socket_id(port), NULL, mbuf_pool);
236 /* Allocate and set up 1 TX queue per Ethernet port. */
237 for (q = 0; q < tx_rings; q++) {
238 retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
239 rte_eth_dev_socket_id(port), NULL);
244 /* Start the Ethernet port. */
245 retval = rte_eth_dev_start(port);
249 /* Display the port MAC address. */
250 rte_eth_macaddr_get(port, &addr);
251 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
252 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
254 addr.addr_bytes[0], addr.addr_bytes[1],
255 addr.addr_bytes[2], addr.addr_bytes[3],
256 addr.addr_bytes[4], addr.addr_bytes[5]);
258 /* Enable RX in promiscuous mode for the Ethernet device. */
259 rte_eth_promiscuous_enable(port);
265 * The lcore main. This is the main thread that does the work, reading from
266 * an input port classifying the packets and writing to an output port.
268 static __attribute__((noreturn)) void
269 lcore_main(struct flow_classifier *cls_app)
271 const uint8_t nb_ports = rte_eth_dev_count();
276 ret = rte_flow_classify_table_entry_delete(cls_app->cls,
277 cls_app->table_id[0], rules[7]);
279 printf("table_entry_delete failed [7] %d\n\n", ret);
281 printf("table_entry_delete succeeded [7]\n\n");
284 * Check that the port is on the same NUMA node as the polling thread
285 * for best performance.
287 for (port = 0; port < nb_ports; port++)
288 if (rte_eth_dev_socket_id(port) > 0 &&
289 rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
291 printf("WARNING: port %u is on remote NUMA node\n",
293 printf("to polling thread.\n");
294 printf("Performance will not be optimal.\n");
296 printf("\nCore %u forwarding packets. ",
298 printf("[Ctrl+C to quit]\n");
300 /* Run until the application is quit or killed. */
303 * Receive packets on a port, classify them and forward them
304 * on the paired port.
305 * The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
307 for (port = 0; port < nb_ports; port++) {
308 /* Get burst of RX packets, from first port of pair. */
309 struct rte_mbuf *bufs[BURST_SIZE];
310 const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
313 if (unlikely(nb_rx == 0))
316 for (i = 0; i < MAX_NUM_CLASSIFY; i++) {
318 ret = rte_flow_classifier_query(
320 cls_app->table_id[0],
321 bufs, nb_rx, rules[i],
325 "rule [%d] query failed ret [%d]\n\n",
329 "rule[%d] count=%"PRIu64"\n",
330 i, ntuple_stats.counter1);
332 printf("proto = %d\n",
333 ntuple_stats.ipv4_5tuple.proto);
338 /* Send burst of TX packets, to second port of pair. */
339 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
342 /* Free any unsent packets. */
343 if (unlikely(nb_tx < nb_rx)) {
346 for (buf = nb_tx; buf < nb_rx; buf++)
347 rte_pktmbuf_free(bufs[buf]);
354 * Parse IPv4 5 tuple rules file, ipv4_rules_file.txt.
356 * <src_ipv4_addr>'/'<masklen> <space> \
357 * <dst_ipv4_addr>'/'<masklen> <space> \
358 * <src_port> <space> ":" <src_port_mask> <space> \
359 * <dst_port> <space> ":" <dst_port_mask> <space> \
360 * <proto>'/'<proto_mask> <space> \
365 get_cb_field(char **in, uint32_t *fd, int base, unsigned long lim,
372 val = strtoul(*in, &end, base);
373 if (errno != 0 || end[0] != dlm || val > lim)
381 parse_ipv4_net(char *in, uint32_t *addr, uint32_t *mask_len)
383 uint32_t a, b, c, d, m;
385 if (get_cb_field(&in, &a, 0, UINT8_MAX, '.'))
387 if (get_cb_field(&in, &b, 0, UINT8_MAX, '.'))
389 if (get_cb_field(&in, &c, 0, UINT8_MAX, '.'))
391 if (get_cb_field(&in, &d, 0, UINT8_MAX, '/'))
393 if (get_cb_field(&in, &m, 0, sizeof(uint32_t) * CHAR_BIT, 0))
396 addr[0] = IPv4(a, b, c, d);
402 parse_ipv4_5tuple_rule(char *str, struct rte_eth_ntuple_filter *ntuple_filter)
405 char *s, *sp, *in[CB_FLD_NUM];
406 static const char *dlm = " \t\n";
407 int dim = CB_FLD_NUM;
411 for (i = 0; i != dim; i++, s = NULL) {
412 in[i] = strtok_r(s, dlm, &sp);
417 ret = parse_ipv4_net(in[CB_FLD_SRC_ADDR],
418 &ntuple_filter->src_ip,
419 &ntuple_filter->src_ip_mask);
421 flow_classify_log("failed to read source address/mask: %s\n",
422 in[CB_FLD_SRC_ADDR]);
426 ret = parse_ipv4_net(in[CB_FLD_DST_ADDR],
427 &ntuple_filter->dst_ip,
428 &ntuple_filter->dst_ip_mask);
430 flow_classify_log("failed to read source address/mask: %s\n",
431 in[CB_FLD_DST_ADDR]);
435 if (get_cb_field(&in[CB_FLD_SRC_PORT], &temp, 0, UINT16_MAX, 0))
437 ntuple_filter->src_port = (uint16_t)temp;
439 if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim,
440 sizeof(cb_port_delim)) != 0)
443 if (get_cb_field(&in[CB_FLD_SRC_PORT_MASK], &temp, 0, UINT16_MAX, 0))
445 ntuple_filter->src_port_mask = (uint16_t)temp;
447 if (get_cb_field(&in[CB_FLD_DST_PORT], &temp, 0, UINT16_MAX, 0))
449 ntuple_filter->dst_port = (uint16_t)temp;
451 if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim,
452 sizeof(cb_port_delim)) != 0)
455 if (get_cb_field(&in[CB_FLD_DST_PORT_MASK], &temp, 0, UINT16_MAX, 0))
457 ntuple_filter->dst_port_mask = (uint16_t)temp;
459 if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, '/'))
461 ntuple_filter->proto = (uint8_t)temp;
463 if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, 0))
465 ntuple_filter->proto_mask = (uint8_t)temp;
467 if (get_cb_field(&in[CB_FLD_PRIORITY], &temp, 0, UINT16_MAX, 0))
469 ntuple_filter->priority = (uint16_t)temp;
470 if (ntuple_filter->priority > FLOW_CLASSIFY_MAX_PRIORITY)
476 /* Bypass comment and empty lines */
478 is_bypass_line(char *buff)
483 if (buff[0] == COMMENT_LEAD_CHAR)
486 while (buff[i] != '\0') {
487 if (!isspace(buff[i]))
495 convert_depth_to_bitmask(uint32_t depth_val)
497 uint32_t bitmask = 0;
500 for (i = depth_val, j = 0; i > 0; i--, j++)
501 bitmask |= (1 << (31 - j));
506 add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter,
507 struct flow_classifier *cls_app)
511 struct rte_flow_error error;
512 struct rte_flow_item_ipv4 ipv4_spec;
513 struct rte_flow_item_ipv4 ipv4_mask;
514 struct rte_flow_item ipv4_udp_item;
515 struct rte_flow_item ipv4_tcp_item;
516 struct rte_flow_item ipv4_sctp_item;
517 struct rte_flow_item_udp udp_spec;
518 struct rte_flow_item_udp udp_mask;
519 struct rte_flow_item udp_item;
520 struct rte_flow_item_tcp tcp_spec;
521 struct rte_flow_item_tcp tcp_mask;
522 struct rte_flow_item tcp_item;
523 struct rte_flow_item_sctp sctp_spec;
524 struct rte_flow_item_sctp sctp_mask;
525 struct rte_flow_item sctp_item;
526 struct rte_flow_item pattern_ipv4_5tuple[4];
527 struct rte_flow_classify_rule *rule;
530 if (num_classify_rules >= MAX_NUM_CLASSIFY) {
532 "\nINFO: classify rule capacity %d reached\n",
537 /* set up parameters for validate and add */
538 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
539 ipv4_spec.hdr.next_proto_id = ntuple_filter->proto;
540 ipv4_spec.hdr.src_addr = ntuple_filter->src_ip;
541 ipv4_spec.hdr.dst_addr = ntuple_filter->dst_ip;
542 ipv4_proto = ipv4_spec.hdr.next_proto_id;
544 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
545 ipv4_mask.hdr.next_proto_id = ntuple_filter->proto_mask;
546 ipv4_mask.hdr.src_addr = ntuple_filter->src_ip_mask;
547 ipv4_mask.hdr.src_addr =
548 convert_depth_to_bitmask(ipv4_mask.hdr.src_addr);
549 ipv4_mask.hdr.dst_addr = ntuple_filter->dst_ip_mask;
550 ipv4_mask.hdr.dst_addr =
551 convert_depth_to_bitmask(ipv4_mask.hdr.dst_addr);
553 switch (ipv4_proto) {
555 ipv4_udp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
556 ipv4_udp_item.spec = &ipv4_spec;
557 ipv4_udp_item.mask = &ipv4_mask;
558 ipv4_udp_item.last = NULL;
560 udp_spec.hdr.src_port = ntuple_filter->src_port;
561 udp_spec.hdr.dst_port = ntuple_filter->dst_port;
562 udp_spec.hdr.dgram_len = 0;
563 udp_spec.hdr.dgram_cksum = 0;
565 udp_mask.hdr.src_port = ntuple_filter->src_port_mask;
566 udp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
567 udp_mask.hdr.dgram_len = 0;
568 udp_mask.hdr.dgram_cksum = 0;
570 udp_item.type = RTE_FLOW_ITEM_TYPE_UDP;
571 udp_item.spec = &udp_spec;
572 udp_item.mask = &udp_mask;
573 udp_item.last = NULL;
575 attr.priority = ntuple_filter->priority;
576 pattern_ipv4_5tuple[1] = ipv4_udp_item;
577 pattern_ipv4_5tuple[2] = udp_item;
580 ipv4_tcp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
581 ipv4_tcp_item.spec = &ipv4_spec;
582 ipv4_tcp_item.mask = &ipv4_mask;
583 ipv4_tcp_item.last = NULL;
585 memset(&tcp_spec, 0, sizeof(tcp_spec));
586 tcp_spec.hdr.src_port = ntuple_filter->src_port;
587 tcp_spec.hdr.dst_port = ntuple_filter->dst_port;
589 memset(&tcp_mask, 0, sizeof(tcp_mask));
590 tcp_mask.hdr.src_port = ntuple_filter->src_port_mask;
591 tcp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
593 tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP;
594 tcp_item.spec = &tcp_spec;
595 tcp_item.mask = &tcp_mask;
596 tcp_item.last = NULL;
598 attr.priority = ntuple_filter->priority;
599 pattern_ipv4_5tuple[1] = ipv4_tcp_item;
600 pattern_ipv4_5tuple[2] = tcp_item;
603 ipv4_sctp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
604 ipv4_sctp_item.spec = &ipv4_spec;
605 ipv4_sctp_item.mask = &ipv4_mask;
606 ipv4_sctp_item.last = NULL;
608 sctp_spec.hdr.src_port = ntuple_filter->src_port;
609 sctp_spec.hdr.dst_port = ntuple_filter->dst_port;
610 sctp_spec.hdr.cksum = 0;
611 sctp_spec.hdr.tag = 0;
613 sctp_mask.hdr.src_port = ntuple_filter->src_port_mask;
614 sctp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
615 sctp_mask.hdr.cksum = 0;
616 sctp_mask.hdr.tag = 0;
618 sctp_item.type = RTE_FLOW_ITEM_TYPE_SCTP;
619 sctp_item.spec = &sctp_spec;
620 sctp_item.mask = &sctp_mask;
621 sctp_item.last = NULL;
623 attr.priority = ntuple_filter->priority;
624 pattern_ipv4_5tuple[1] = ipv4_sctp_item;
625 pattern_ipv4_5tuple[2] = sctp_item;
632 pattern_ipv4_5tuple[0] = eth_item;
633 pattern_ipv4_5tuple[3] = end_item;
634 actions[0] = count_action;
635 actions[1] = end_action;
637 rule = rte_flow_classify_table_entry_add(
638 cls_app->cls, cls_app->table_id[0], &key_found,
639 &attr, pattern_ipv4_5tuple, actions, &error);
641 printf("table entry add failed ipv4_proto = %u\n",
647 rules[num_classify_rules] = rule;
648 num_classify_rules++;
653 add_rules(const char *rule_path, struct flow_classifier *cls_app)
658 unsigned int total_num = 0;
659 struct rte_eth_ntuple_filter ntuple_filter;
662 fh = fopen(rule_path, "rb");
664 rte_exit(EXIT_FAILURE, "%s: fopen %s failed\n", __func__,
667 ret = fseek(fh, 0, SEEK_SET);
669 rte_exit(EXIT_FAILURE, "%s: fseek %d failed\n", __func__,
673 while (fgets(buff, LINE_MAX, fh) != NULL) {
676 if (is_bypass_line(buff))
679 if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) {
680 printf("\nINFO: classify rule capacity %d reached\n",
685 if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0)
686 rte_exit(EXIT_FAILURE,
687 "%s Line %u: parse rules error\n",
690 if (add_classify_rule(&ntuple_filter, cls_app) != 0)
691 rte_exit(EXIT_FAILURE, "add rule error\n");
702 print_usage(const char *prgname)
704 printf("%s usage:\n", prgname);
705 printf("[EAL options] -- --"OPTION_RULE_IPV4"=FILE: ");
706 printf("specify the ipv4 rules file.\n");
707 printf("Each rule occupies one line in the file.\n");
710 /* Parse the argument given in the command line of the application */
712 parse_args(int argc, char **argv)
717 char *prgname = argv[0];
718 static struct option lgopts[] = {
719 {OPTION_RULE_IPV4, 1, 0, 0},
725 while ((opt = getopt_long(argc, argvopt, "",
726 lgopts, &option_index)) != EOF) {
731 if (!strncmp(lgopts[option_index].name,
733 sizeof(OPTION_RULE_IPV4)))
734 parm_config.rule_ipv4_name = optarg;
737 print_usage(prgname);
743 argv[optind-1] = prgname;
746 optind = 1; /* reset getopt lib */
751 * The main function, which does initialization and calls the lcore_main
755 main(int argc, char *argv[])
757 struct rte_mempool *mbuf_pool;
762 struct rte_table_acl_params table_acl_params;
763 struct rte_flow_classify_table_params cls_table_params;
764 struct flow_classifier *cls_app;
765 struct rte_flow_classifier_params cls_params;
768 /* Initialize the Environment Abstraction Layer (EAL). */
769 ret = rte_eal_init(argc, argv);
771 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
776 /* parse application arguments (after the EAL ones) */
777 ret = parse_args(argc, argv);
779 rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
781 /* Check that there is an even number of ports to send/receive on. */
782 nb_ports = rte_eth_dev_count();
783 if (nb_ports < 2 || (nb_ports & 1))
784 rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
786 /* Creates a new mempool in memory to hold the mbufs. */
787 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
788 MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
790 if (mbuf_pool == NULL)
791 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
793 /* Initialize all ports. */
794 for (portid = 0; portid < nb_ports; portid++)
795 if (port_init(portid, mbuf_pool) != 0)
796 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
799 if (rte_lcore_count() > 1)
800 printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
802 socket_id = rte_eth_dev_socket_id(0);
804 /* Memory allocation */
805 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
806 cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
808 rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n");
810 cls_params.name = "flow_classifier";
811 cls_params.socket_id = socket_id;
812 cls_params.type = RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL;
814 cls_app->cls = rte_flow_classifier_create(&cls_params);
815 if (cls_app->cls == NULL) {
817 rte_exit(EXIT_FAILURE, "Cannot create classifier\n");
820 /* initialise ACL table params */
821 table_acl_params.name = "table_acl_ipv4_5tuple";
822 table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
823 table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
824 memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
826 /* initialise table create params */
827 cls_table_params.ops = &rte_table_acl_ops,
828 cls_table_params.arg_create = &table_acl_params,
830 ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params,
831 &cls_app->table_id[0]);
833 rte_flow_classifier_free(cls_app->cls);
835 rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
838 /* read file of IPv4 5 tuple rules and initialize parameters
839 * for rte_flow_classify_validate and rte_flow_classify_table_entry_add
842 if (add_rules(parm_config.rule_ipv4_name, cls_app)) {
843 rte_flow_classifier_free(cls_app->cls);
845 rte_exit(EXIT_FAILURE, "Failed to add rules\n");
848 /* Call lcore_main on the master core only. */