1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
10 #include <rte_ethdev.h>
11 #include <rte_cycles.h>
12 #include <rte_lcore.h>
15 #include <rte_flow_classify.h>
16 #include <rte_table_acl.h>
18 #define RX_RING_SIZE 128
19 #define TX_RING_SIZE 512
21 #define NUM_MBUFS 8191
22 #define MBUF_CACHE_SIZE 250
25 #define MAX_NUM_CLASSIFY 30
26 #define FLOW_CLASSIFY_MAX_RULE_NUM 91
27 #define FLOW_CLASSIFY_MAX_PRIORITY 8
28 #define FLOW_CLASSIFIER_NAME_SIZE 64
30 #define COMMENT_LEAD_CHAR ('#')
31 #define OPTION_RULE_IPV4 "rule_ipv4"
32 #define RTE_LOGTYPE_FLOW_CLASSIFY RTE_LOGTYPE_USER3
33 #define flow_classify_log(format, ...) \
34 RTE_LOG(ERR, FLOW_CLASSIFY, format, ##__VA_ARGS__)
36 #define uint32_t_to_char(ip, a, b, c, d) do {\
37 *a = (unsigned char)(ip >> 24 & 0xff);\
38 *b = (unsigned char)(ip >> 16 & 0xff);\
39 *c = (unsigned char)(ip >> 8 & 0xff);\
40 *d = (unsigned char)(ip & 0xff);\
58 const char *rule_ipv4_name;
60 const char cb_port_delim[] = ":";
62 static const struct rte_eth_conf port_conf_default = {
63 .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
66 struct flow_classifier {
67 struct rte_flow_classifier *cls;
70 struct flow_classifier_acl {
71 struct flow_classifier cls;
72 } __rte_cache_aligned;
74 /* ACL field definitions for IPv4 5 tuple rule */
92 static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
93 /* first input field - always one byte long. */
95 .type = RTE_ACL_FIELD_TYPE_BITMASK,
96 .size = sizeof(uint8_t),
97 .field_index = PROTO_FIELD_IPV4,
98 .input_index = PROTO_INPUT_IPV4,
99 .offset = sizeof(struct ether_hdr) +
100 offsetof(struct ipv4_hdr, next_proto_id),
102 /* next input field (IPv4 source address) - 4 consecutive bytes. */
104 /* rte_flow uses a bit mask for IPv4 addresses */
105 .type = RTE_ACL_FIELD_TYPE_BITMASK,
106 .size = sizeof(uint32_t),
107 .field_index = SRC_FIELD_IPV4,
108 .input_index = SRC_INPUT_IPV4,
109 .offset = sizeof(struct ether_hdr) +
110 offsetof(struct ipv4_hdr, src_addr),
112 /* next input field (IPv4 destination address) - 4 consecutive bytes. */
114 /* rte_flow uses a bit mask for IPv4 addresses */
115 .type = RTE_ACL_FIELD_TYPE_BITMASK,
116 .size = sizeof(uint32_t),
117 .field_index = DST_FIELD_IPV4,
118 .input_index = DST_INPUT_IPV4,
119 .offset = sizeof(struct ether_hdr) +
120 offsetof(struct ipv4_hdr, dst_addr),
123 * Next 2 fields (src & dst ports) form 4 consecutive bytes.
124 * They share the same input index.
127 /* rte_flow uses a bit mask for protocol ports */
128 .type = RTE_ACL_FIELD_TYPE_BITMASK,
129 .size = sizeof(uint16_t),
130 .field_index = SRCP_FIELD_IPV4,
131 .input_index = SRCP_DESTP_INPUT_IPV4,
132 .offset = sizeof(struct ether_hdr) +
133 sizeof(struct ipv4_hdr) +
134 offsetof(struct tcp_hdr, src_port),
137 /* rte_flow uses a bit mask for protocol ports */
138 .type = RTE_ACL_FIELD_TYPE_BITMASK,
139 .size = sizeof(uint16_t),
140 .field_index = DSTP_FIELD_IPV4,
141 .input_index = SRCP_DESTP_INPUT_IPV4,
142 .offset = sizeof(struct ether_hdr) +
143 sizeof(struct ipv4_hdr) +
144 offsetof(struct tcp_hdr, dst_port),
148 /* flow classify data */
149 static int num_classify_rules;
150 static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
151 static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
152 static struct rte_flow_classify_stats classify_stats = {
153 .stats = (void **)&ntuple_stats
156 /* parameters for rte_flow_classify_validate and
157 * rte_flow_classify_table_entry_add functions
160 static struct rte_flow_item eth_item = { RTE_FLOW_ITEM_TYPE_ETH,
162 static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END,
166 * "actions count / end"
168 struct rte_flow_query_count count = {
175 static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT,
177 static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
178 static struct rte_flow_action actions[2];
180 /* sample attributes */
181 static struct rte_flow_attr attr;
183 /* flow_classify.c: * Based on DPDK skeleton forwarding example. */
186 * Initializes a given port using global settings and with the RX buffers
187 * coming from the mbuf_pool passed as a parameter.
190 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
192 struct rte_eth_conf port_conf = port_conf_default;
193 struct ether_addr addr;
194 const uint16_t rx_rings = 1, tx_rings = 1;
198 if (port >= rte_eth_dev_count())
201 /* Configure the Ethernet device. */
202 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
206 /* Allocate and set up 1 RX queue per Ethernet port. */
207 for (q = 0; q < rx_rings; q++) {
208 retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
209 rte_eth_dev_socket_id(port), NULL, mbuf_pool);
214 /* Allocate and set up 1 TX queue per Ethernet port. */
215 for (q = 0; q < tx_rings; q++) {
216 retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
217 rte_eth_dev_socket_id(port), NULL);
222 /* Start the Ethernet port. */
223 retval = rte_eth_dev_start(port);
227 /* Display the port MAC address. */
228 rte_eth_macaddr_get(port, &addr);
229 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
230 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
232 addr.addr_bytes[0], addr.addr_bytes[1],
233 addr.addr_bytes[2], addr.addr_bytes[3],
234 addr.addr_bytes[4], addr.addr_bytes[5]);
236 /* Enable RX in promiscuous mode for the Ethernet device. */
237 rte_eth_promiscuous_enable(port);
243 * The lcore main. This is the main thread that does the work, reading from
244 * an input port classifying the packets and writing to an output port.
246 static __attribute__((noreturn)) void
247 lcore_main(struct flow_classifier *cls_app)
249 const uint8_t nb_ports = rte_eth_dev_count();
254 ret = rte_flow_classify_table_entry_delete(cls_app->cls,
257 printf("table_entry_delete failed [7] %d\n\n", ret);
259 printf("table_entry_delete succeeded [7]\n\n");
262 * Check that the port is on the same NUMA node as the polling thread
263 * for best performance.
265 for (port = 0; port < nb_ports; port++)
266 if (rte_eth_dev_socket_id(port) > 0 &&
267 rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
269 printf("WARNING: port %u is on remote NUMA node\n",
271 printf("to polling thread.\n");
272 printf("Performance will not be optimal.\n");
274 printf("\nCore %u forwarding packets. ", rte_lcore_id());
275 printf("[Ctrl+C to quit]\n");
277 /* Run until the application is quit or killed. */
280 * Receive packets on a port, classify them and forward them
281 * on the paired port.
282 * The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
284 for (port = 0; port < nb_ports; port++) {
285 /* Get burst of RX packets, from first port of pair. */
286 struct rte_mbuf *bufs[BURST_SIZE];
287 const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
290 if (unlikely(nb_rx == 0))
293 for (i = 0; i < MAX_NUM_CLASSIFY; i++) {
295 ret = rte_flow_classifier_query(
297 bufs, nb_rx, rules[i],
301 "rule [%d] query failed ret [%d]\n\n",
305 "rule[%d] count=%"PRIu64"\n",
306 i, ntuple_stats.counter1);
308 printf("proto = %d\n",
309 ntuple_stats.ipv4_5tuple.proto);
314 /* Send burst of TX packets, to second port of pair. */
315 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
318 /* Free any unsent packets. */
319 if (unlikely(nb_tx < nb_rx)) {
322 for (buf = nb_tx; buf < nb_rx; buf++)
323 rte_pktmbuf_free(bufs[buf]);
330 * Parse IPv4 5 tuple rules file, ipv4_rules_file.txt.
332 * <src_ipv4_addr>'/'<masklen> <space> \
333 * <dst_ipv4_addr>'/'<masklen> <space> \
334 * <src_port> <space> ":" <src_port_mask> <space> \
335 * <dst_port> <space> ":" <dst_port_mask> <space> \
336 * <proto>'/'<proto_mask> <space> \
341 get_cb_field(char **in, uint32_t *fd, int base, unsigned long lim,
348 val = strtoul(*in, &end, base);
349 if (errno != 0 || end[0] != dlm || val > lim)
357 parse_ipv4_net(char *in, uint32_t *addr, uint32_t *mask_len)
359 uint32_t a, b, c, d, m;
361 if (get_cb_field(&in, &a, 0, UINT8_MAX, '.'))
363 if (get_cb_field(&in, &b, 0, UINT8_MAX, '.'))
365 if (get_cb_field(&in, &c, 0, UINT8_MAX, '.'))
367 if (get_cb_field(&in, &d, 0, UINT8_MAX, '/'))
369 if (get_cb_field(&in, &m, 0, sizeof(uint32_t) * CHAR_BIT, 0))
372 addr[0] = IPv4(a, b, c, d);
378 parse_ipv4_5tuple_rule(char *str, struct rte_eth_ntuple_filter *ntuple_filter)
381 char *s, *sp, *in[CB_FLD_NUM];
382 static const char *dlm = " \t\n";
383 int dim = CB_FLD_NUM;
387 for (i = 0; i != dim; i++, s = NULL) {
388 in[i] = strtok_r(s, dlm, &sp);
393 ret = parse_ipv4_net(in[CB_FLD_SRC_ADDR],
394 &ntuple_filter->src_ip,
395 &ntuple_filter->src_ip_mask);
397 flow_classify_log("failed to read source address/mask: %s\n",
398 in[CB_FLD_SRC_ADDR]);
402 ret = parse_ipv4_net(in[CB_FLD_DST_ADDR],
403 &ntuple_filter->dst_ip,
404 &ntuple_filter->dst_ip_mask);
406 flow_classify_log("failed to read source address/mask: %s\n",
407 in[CB_FLD_DST_ADDR]);
411 if (get_cb_field(&in[CB_FLD_SRC_PORT], &temp, 0, UINT16_MAX, 0))
413 ntuple_filter->src_port = (uint16_t)temp;
415 if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim,
416 sizeof(cb_port_delim)) != 0)
419 if (get_cb_field(&in[CB_FLD_SRC_PORT_MASK], &temp, 0, UINT16_MAX, 0))
421 ntuple_filter->src_port_mask = (uint16_t)temp;
423 if (get_cb_field(&in[CB_FLD_DST_PORT], &temp, 0, UINT16_MAX, 0))
425 ntuple_filter->dst_port = (uint16_t)temp;
427 if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim,
428 sizeof(cb_port_delim)) != 0)
431 if (get_cb_field(&in[CB_FLD_DST_PORT_MASK], &temp, 0, UINT16_MAX, 0))
433 ntuple_filter->dst_port_mask = (uint16_t)temp;
435 if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, '/'))
437 ntuple_filter->proto = (uint8_t)temp;
439 if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, 0))
441 ntuple_filter->proto_mask = (uint8_t)temp;
443 if (get_cb_field(&in[CB_FLD_PRIORITY], &temp, 0, UINT16_MAX, 0))
445 ntuple_filter->priority = (uint16_t)temp;
446 if (ntuple_filter->priority > FLOW_CLASSIFY_MAX_PRIORITY)
452 /* Bypass comment and empty lines */
454 is_bypass_line(char *buff)
459 if (buff[0] == COMMENT_LEAD_CHAR)
462 while (buff[i] != '\0') {
463 if (!isspace(buff[i]))
471 convert_depth_to_bitmask(uint32_t depth_val)
473 uint32_t bitmask = 0;
476 for (i = depth_val, j = 0; i > 0; i--, j++)
477 bitmask |= (1 << (31 - j));
482 add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter,
483 struct flow_classifier *cls_app)
487 struct rte_flow_error error;
488 struct rte_flow_item_ipv4 ipv4_spec;
489 struct rte_flow_item_ipv4 ipv4_mask;
490 struct rte_flow_item ipv4_udp_item;
491 struct rte_flow_item ipv4_tcp_item;
492 struct rte_flow_item ipv4_sctp_item;
493 struct rte_flow_item_udp udp_spec;
494 struct rte_flow_item_udp udp_mask;
495 struct rte_flow_item udp_item;
496 struct rte_flow_item_tcp tcp_spec;
497 struct rte_flow_item_tcp tcp_mask;
498 struct rte_flow_item tcp_item;
499 struct rte_flow_item_sctp sctp_spec;
500 struct rte_flow_item_sctp sctp_mask;
501 struct rte_flow_item sctp_item;
502 struct rte_flow_item pattern_ipv4_5tuple[4];
503 struct rte_flow_classify_rule *rule;
506 if (num_classify_rules >= MAX_NUM_CLASSIFY) {
508 "\nINFO: classify rule capacity %d reached\n",
513 /* set up parameters for validate and add */
514 memset(&ipv4_spec, 0, sizeof(ipv4_spec));
515 ipv4_spec.hdr.next_proto_id = ntuple_filter->proto;
516 ipv4_spec.hdr.src_addr = ntuple_filter->src_ip;
517 ipv4_spec.hdr.dst_addr = ntuple_filter->dst_ip;
518 ipv4_proto = ipv4_spec.hdr.next_proto_id;
520 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
521 ipv4_mask.hdr.next_proto_id = ntuple_filter->proto_mask;
522 ipv4_mask.hdr.src_addr = ntuple_filter->src_ip_mask;
523 ipv4_mask.hdr.src_addr =
524 convert_depth_to_bitmask(ipv4_mask.hdr.src_addr);
525 ipv4_mask.hdr.dst_addr = ntuple_filter->dst_ip_mask;
526 ipv4_mask.hdr.dst_addr =
527 convert_depth_to_bitmask(ipv4_mask.hdr.dst_addr);
529 switch (ipv4_proto) {
531 ipv4_udp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
532 ipv4_udp_item.spec = &ipv4_spec;
533 ipv4_udp_item.mask = &ipv4_mask;
534 ipv4_udp_item.last = NULL;
536 udp_spec.hdr.src_port = ntuple_filter->src_port;
537 udp_spec.hdr.dst_port = ntuple_filter->dst_port;
538 udp_spec.hdr.dgram_len = 0;
539 udp_spec.hdr.dgram_cksum = 0;
541 udp_mask.hdr.src_port = ntuple_filter->src_port_mask;
542 udp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
543 udp_mask.hdr.dgram_len = 0;
544 udp_mask.hdr.dgram_cksum = 0;
546 udp_item.type = RTE_FLOW_ITEM_TYPE_UDP;
547 udp_item.spec = &udp_spec;
548 udp_item.mask = &udp_mask;
549 udp_item.last = NULL;
551 attr.priority = ntuple_filter->priority;
552 pattern_ipv4_5tuple[1] = ipv4_udp_item;
553 pattern_ipv4_5tuple[2] = udp_item;
556 ipv4_tcp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
557 ipv4_tcp_item.spec = &ipv4_spec;
558 ipv4_tcp_item.mask = &ipv4_mask;
559 ipv4_tcp_item.last = NULL;
561 memset(&tcp_spec, 0, sizeof(tcp_spec));
562 tcp_spec.hdr.src_port = ntuple_filter->src_port;
563 tcp_spec.hdr.dst_port = ntuple_filter->dst_port;
565 memset(&tcp_mask, 0, sizeof(tcp_mask));
566 tcp_mask.hdr.src_port = ntuple_filter->src_port_mask;
567 tcp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
569 tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP;
570 tcp_item.spec = &tcp_spec;
571 tcp_item.mask = &tcp_mask;
572 tcp_item.last = NULL;
574 attr.priority = ntuple_filter->priority;
575 pattern_ipv4_5tuple[1] = ipv4_tcp_item;
576 pattern_ipv4_5tuple[2] = tcp_item;
579 ipv4_sctp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
580 ipv4_sctp_item.spec = &ipv4_spec;
581 ipv4_sctp_item.mask = &ipv4_mask;
582 ipv4_sctp_item.last = NULL;
584 sctp_spec.hdr.src_port = ntuple_filter->src_port;
585 sctp_spec.hdr.dst_port = ntuple_filter->dst_port;
586 sctp_spec.hdr.cksum = 0;
587 sctp_spec.hdr.tag = 0;
589 sctp_mask.hdr.src_port = ntuple_filter->src_port_mask;
590 sctp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
591 sctp_mask.hdr.cksum = 0;
592 sctp_mask.hdr.tag = 0;
594 sctp_item.type = RTE_FLOW_ITEM_TYPE_SCTP;
595 sctp_item.spec = &sctp_spec;
596 sctp_item.mask = &sctp_mask;
597 sctp_item.last = NULL;
599 attr.priority = ntuple_filter->priority;
600 pattern_ipv4_5tuple[1] = ipv4_sctp_item;
601 pattern_ipv4_5tuple[2] = sctp_item;
608 pattern_ipv4_5tuple[0] = eth_item;
609 pattern_ipv4_5tuple[3] = end_item;
610 actions[0] = count_action;
611 actions[1] = end_action;
613 /* Validate and add rule */
614 ret = rte_flow_classify_validate(cls_app->cls, &attr,
615 pattern_ipv4_5tuple, actions, &error);
617 printf("table entry validate failed ipv4_proto = %u\n",
622 rule = rte_flow_classify_table_entry_add(
623 cls_app->cls, &attr, pattern_ipv4_5tuple,
624 actions, &key_found, &error);
626 printf("table entry add failed ipv4_proto = %u\n",
632 rules[num_classify_rules] = rule;
633 num_classify_rules++;
638 add_rules(const char *rule_path, struct flow_classifier *cls_app)
643 unsigned int total_num = 0;
644 struct rte_eth_ntuple_filter ntuple_filter;
647 fh = fopen(rule_path, "rb");
649 rte_exit(EXIT_FAILURE, "%s: fopen %s failed\n", __func__,
652 ret = fseek(fh, 0, SEEK_SET);
654 rte_exit(EXIT_FAILURE, "%s: fseek %d failed\n", __func__,
658 while (fgets(buff, LINE_MAX, fh) != NULL) {
661 if (is_bypass_line(buff))
664 if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) {
665 printf("\nINFO: classify rule capacity %d reached\n",
670 if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0)
671 rte_exit(EXIT_FAILURE,
672 "%s Line %u: parse rules error\n",
675 if (add_classify_rule(&ntuple_filter, cls_app) != 0)
676 rte_exit(EXIT_FAILURE, "add rule error\n");
687 print_usage(const char *prgname)
689 printf("%s usage:\n", prgname);
690 printf("[EAL options] -- --"OPTION_RULE_IPV4"=FILE: ");
691 printf("specify the ipv4 rules file.\n");
692 printf("Each rule occupies one line in the file.\n");
695 /* Parse the argument given in the command line of the application */
697 parse_args(int argc, char **argv)
702 char *prgname = argv[0];
703 static struct option lgopts[] = {
704 {OPTION_RULE_IPV4, 1, 0, 0},
710 while ((opt = getopt_long(argc, argvopt, "",
711 lgopts, &option_index)) != EOF) {
716 if (!strncmp(lgopts[option_index].name,
718 sizeof(OPTION_RULE_IPV4)))
719 parm_config.rule_ipv4_name = optarg;
722 print_usage(prgname);
728 argv[optind-1] = prgname;
731 optind = 1; /* reset getopt lib */
736 * The main function, which does initialization and calls the lcore_main
740 main(int argc, char *argv[])
742 struct rte_mempool *mbuf_pool;
747 struct rte_table_acl_params table_acl_params;
748 struct rte_flow_classify_table_params cls_table_params;
749 struct flow_classifier *cls_app;
750 struct rte_flow_classifier_params cls_params;
753 /* Initialize the Environment Abstraction Layer (EAL). */
754 ret = rte_eal_init(argc, argv);
756 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
761 /* parse application arguments (after the EAL ones) */
762 ret = parse_args(argc, argv);
764 rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
766 /* Check that there is an even number of ports to send/receive on. */
767 nb_ports = rte_eth_dev_count();
768 if (nb_ports < 2 || (nb_ports & 1))
769 rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
771 /* Creates a new mempool in memory to hold the mbufs. */
772 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
773 MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
775 if (mbuf_pool == NULL)
776 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
778 /* Initialize all ports. */
779 for (portid = 0; portid < nb_ports; portid++)
780 if (port_init(portid, mbuf_pool) != 0)
781 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
784 if (rte_lcore_count() > 1)
785 printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
787 socket_id = rte_eth_dev_socket_id(0);
789 /* Memory allocation */
790 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
791 cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
793 rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n");
795 cls_params.name = "flow_classifier";
796 cls_params.socket_id = socket_id;
798 cls_app->cls = rte_flow_classifier_create(&cls_params);
799 if (cls_app->cls == NULL) {
801 rte_exit(EXIT_FAILURE, "Cannot create classifier\n");
804 /* initialise ACL table params */
805 table_acl_params.name = "table_acl_ipv4_5tuple";
806 table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
807 table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
808 memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
810 /* initialise table create params */
811 cls_table_params.ops = &rte_table_acl_ops;
812 cls_table_params.arg_create = &table_acl_params;
813 cls_table_params.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
815 ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params);
817 rte_flow_classifier_free(cls_app->cls);
819 rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
822 /* read file of IPv4 5 tuple rules and initialize parameters
823 * for rte_flow_classify_validate and rte_flow_classify_table_entry_add
826 if (add_rules(parm_config.rule_ipv4_name, cls_app)) {
827 rte_flow_classifier_free(cls_app->cls);
829 rte_exit(EXIT_FAILURE, "Failed to add rules\n");
832 /* Call lcore_main on the master core only. */