1 .. SPDX-License-Identifier: BSD-3-Clause
2 Copyright(c) 2017 Intel Corporation.
4 Flow Classify Sample Application
5 ================================
7 The Flow Classify sample application is based on the simple *skeleton* example
8 of a forwarding application.
10 It is intended as a demonstration of the basic components of a DPDK forwarding
11 application which uses the Flow Classify library API's.
14 :doc:`../prog_guide/flow_classify_lib`
17 Compiling the Application
18 -------------------------
20 To compile the sample application see :doc:`compiling`.
22 The application is located in the ``flow_classify`` sub-directory.
24 Running the Application
25 -----------------------
27 To run the example in a ``linuxapp`` environment:
29 .. code-block:: console
31 cd ~/dpdk/examples/flow_classify
32 ./build/flow_classify -c 4 -n 4 -- --rule_ipv4="../ipv4_rules_file.txt"
34 Please refer to the *DPDK Getting Started Guide*, section
35 :doc:`../linux_gsg/build_sample_apps`
36 for general information on running applications and the Environment Abstraction
40 Sample ipv4_rules_file.txt
41 --------------------------
43 .. code-block:: console
46 #src_ip/masklen dst_ip/masklen src_port : mask dst_port : mask proto/mask priority
48 2.2.2.3/24 2.2.2.7/24 32 : 0xffff 33 : 0xffff 17/0xff 0
49 9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 17/0xff 1
50 9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 6/0xff 2
51 9.9.8.3/24 9.9.8.7/24 32 : 0xffff 33 : 0xffff 6/0xff 3
52 6.7.8.9/24 2.3.4.5/24 32 : 0x0000 33 : 0x0000 132/0xff 4
57 The following sections provide an explanation of the main components of the
60 All DPDK library functions used in the sample code are prefixed with ``rte_``
61 and are explained in detail in the *DPDK API Documentation*.
63 ACL field definitions for the IPv4 5 tuple rule
64 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
66 The following field definitions are used when creating the ACL table during
67 initialisation of the ``Flow Classify`` application..
87 static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
88 /* first input field - always one byte long. */
90 .type = RTE_ACL_FIELD_TYPE_BITMASK,
91 .size = sizeof(uint8_t),
92 .field_index = PROTO_FIELD_IPV4,
93 .input_index = PROTO_INPUT_IPV4,
94 .offset = sizeof(struct ether_hdr) +
95 offsetof(struct ipv4_hdr, next_proto_id),
97 /* next input field (IPv4 source address) - 4 consecutive bytes. */
99 /* rte_flow uses a bit mask for IPv4 addresses */
100 .type = RTE_ACL_FIELD_TYPE_BITMASK,
101 .size = sizeof(uint32_t),
102 .field_index = SRC_FIELD_IPV4,
103 .input_index = SRC_INPUT_IPV4,
104 .offset = sizeof(struct ether_hdr) +
105 offsetof(struct ipv4_hdr, src_addr),
107 /* next input field (IPv4 destination address) - 4 consecutive bytes. */
109 /* rte_flow uses a bit mask for IPv4 addresses */
110 .type = RTE_ACL_FIELD_TYPE_BITMASK,
111 .size = sizeof(uint32_t),
112 .field_index = DST_FIELD_IPV4,
113 .input_index = DST_INPUT_IPV4,
114 .offset = sizeof(struct ether_hdr) +
115 offsetof(struct ipv4_hdr, dst_addr),
118 * Next 2 fields (src & dst ports) form 4 consecutive bytes.
119 * They share the same input index.
122 /* rte_flow uses a bit mask for protocol ports */
123 .type = RTE_ACL_FIELD_TYPE_BITMASK,
124 .size = sizeof(uint16_t),
125 .field_index = SRCP_FIELD_IPV4,
126 .input_index = SRCP_DESTP_INPUT_IPV4,
127 .offset = sizeof(struct ether_hdr) +
128 sizeof(struct ipv4_hdr) +
129 offsetof(struct tcp_hdr, src_port),
132 /* rte_flow uses a bit mask for protocol ports */
133 .type = RTE_ACL_FIELD_TYPE_BITMASK,
134 .size = sizeof(uint16_t),
135 .field_index = DSTP_FIELD_IPV4,
136 .input_index = SRCP_DESTP_INPUT_IPV4,
137 .offset = sizeof(struct ether_hdr) +
138 sizeof(struct ipv4_hdr) +
139 offsetof(struct tcp_hdr, dst_port),
146 The ``main()`` function performs the initialization and calls the execution
147 threads for each lcore.
149 The first task is to initialize the Environment Abstraction Layer (EAL).
150 The ``argc`` and ``argv`` arguments are provided to the ``rte_eal_init()``
151 function. The value returned is the number of parsed arguments:
155 int ret = rte_eal_init(argc, argv);
157 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
159 It then parses the flow_classify application arguments
163 ret = parse_args(argc, argv);
165 rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
167 The ``main()`` function also allocates a mempool to hold the mbufs
168 (Message Buffers) used by the application:
172 mbuf_pool = rte_mempool_create("MBUF_POOL",
173 NUM_MBUFS * nb_ports,
176 sizeof(struct rte_pktmbuf_pool_private),
177 rte_pktmbuf_pool_init, NULL,
178 rte_pktmbuf_init, NULL,
182 mbufs are the packet buffer structure used by DPDK. They are explained in
183 detail in the "Mbuf Library" section of the *DPDK Programmer's Guide*.
185 The ``main()`` function also initializes all the ports using the user defined
186 ``port_init()`` function which is explained in the next section:
190 RTE_ETH_FOREACH_DEV(portid) {
191 if (port_init(portid, mbuf_pool) != 0) {
192 rte_exit(EXIT_FAILURE,
193 "Cannot init port %" PRIu8 "\n", portid);
197 The ``main()`` function creates the ``flow classifier object`` and adds an ``ACL
198 table`` to the flow classifier.
202 struct flow_classifier {
203 struct rte_flow_classifier *cls;
206 struct flow_classifier_acl {
207 struct flow_classifier cls;
208 } __rte_cache_aligned;
210 /* Memory allocation */
211 size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
212 cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
214 rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n");
216 cls_params.name = "flow_classifier";
217 cls_params.socket_id = socket_id;
219 cls_app->cls = rte_flow_classifier_create(&cls_params);
220 if (cls_app->cls == NULL) {
222 rte_exit(EXIT_FAILURE, "Cannot create classifier\n");
225 /* initialise ACL table params */
226 table_acl_params.name = "table_acl_ipv4_5tuple";
227 table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
228 table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
229 memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
231 /* initialise table create params */
232 cls_table_params.ops = &rte_table_acl_ops,
233 cls_table_params.arg_create = &table_acl_params,
234 cls_table_params.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE;
236 ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params);
238 rte_flow_classifier_free(cls_app->cls);
240 rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
243 It then reads the ipv4_rules_file.txt file and initialises the parameters for
244 the ``rte_flow_classify_table_entry_add`` API.
245 This API adds a rule to the ACL table.
249 if (add_rules(parm_config.rule_ipv4_name)) {
250 rte_flow_classifier_free(cls_app->cls);
252 rte_exit(EXIT_FAILURE, "Failed to add rules\n");
255 Once the initialization is complete, the application is ready to launch a
256 function on an lcore. In this example ``lcore_main()`` is called on a single
263 The ``lcore_main()`` function is explained below.
265 The Port Initialization Function
266 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
268 The main functional part of the port initialization used in the Basic
269 Forwarding application is shown below:
274 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
276 struct rte_eth_conf port_conf = port_conf_default;
277 const uint16_t rx_rings = 1, tx_rings = 1;
278 struct ether_addr addr;
282 /* Configure the Ethernet device. */
283 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
287 /* Allocate and set up 1 RX queue per Ethernet port. */
288 for (q = 0; q < rx_rings; q++) {
289 retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
290 rte_eth_dev_socket_id(port), NULL, mbuf_pool);
295 /* Allocate and set up 1 TX queue per Ethernet port. */
296 for (q = 0; q < tx_rings; q++) {
297 retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
298 rte_eth_dev_socket_id(port), NULL);
303 /* Start the Ethernet port. */
304 retval = rte_eth_dev_start(port);
308 /* Display the port MAC address. */
309 rte_eth_macaddr_get(port, &addr);
310 printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
311 " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
313 addr.addr_bytes[0], addr.addr_bytes[1],
314 addr.addr_bytes[2], addr.addr_bytes[3],
315 addr.addr_bytes[4], addr.addr_bytes[5]);
317 /* Enable RX in promiscuous mode for the Ethernet device. */
318 rte_eth_promiscuous_enable(port);
323 The Ethernet ports are configured with default settings using the
324 ``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct.
328 static const struct rte_eth_conf port_conf_default = {
329 .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
332 For this example the ports are set up with 1 RX and 1 TX queue using the
333 ``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
335 The Ethernet port is then started:
339 retval = rte_eth_dev_start(port);
342 Finally the RX port is set in promiscuous mode:
346 rte_eth_promiscuous_enable(port);
348 The Add Rules function
349 ~~~~~~~~~~~~~~~~~~~~~~
351 The ``add_rules`` function reads the ``ipv4_rules_file.txt`` file and calls the
352 ``add_classify_rule`` function which calls the
353 ``rte_flow_classify_table_entry_add`` API.
358 add_rules(const char *rule_path)
363 unsigned int total_num = 0;
364 struct rte_eth_ntuple_filter ntuple_filter;
366 fh = fopen(rule_path, "rb");
368 rte_exit(EXIT_FAILURE, "%s: Open %s failed\n", __func__,
371 fseek(fh, 0, SEEK_SET);
374 while (fgets(buff, LINE_MAX, fh) != NULL) {
377 if (is_bypass_line(buff))
380 if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) {
381 printf("\nINFO: classify rule capacity %d reached\n",
386 if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0)
387 rte_exit(EXIT_FAILURE,
388 "%s Line %u: parse rules error\n",
391 if (add_classify_rule(&ntuple_filter) != 0)
392 rte_exit(EXIT_FAILURE, "add rule error\n");
402 The Lcore Main function
403 ~~~~~~~~~~~~~~~~~~~~~~~
405 As we saw above the ``main()`` function calls an application function on the
407 The ``lcore_main`` function calls the ``rte_flow_classifier_query`` API.
408 For the Basic Forwarding application the ``lcore_main`` function looks like the
413 /* flow classify data */
414 static int num_classify_rules;
415 static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
416 static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
417 static struct rte_flow_classify_stats classify_stats = {
418 .stats = (void *)&ntuple_stats
421 static __attribute__((noreturn)) void
427 * Check that the port is on the same NUMA node as the polling thread
428 * for best performance.
430 RTE_ETH_FOREACH_DEV(port)
431 if (rte_eth_dev_socket_id(port) > 0 &&
432 rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
434 printf("WARNING: port %u is on remote NUMA node\n",
436 printf("to polling thread.\n");
437 printf("Performance will not be optimal.\n");
439 printf("\nCore %u forwarding packets. \n",
441 printf("[Ctrl+C to quit]\n
444 /* Run until the application is quit or killed. */
447 * Receive packets on a port and forward them on the paired
448 * port. The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
450 RTE_ETH_FOREACH_DEV(port) {
452 /* Get burst of RX packets, from first port of pair. */
453 struct rte_mbuf *bufs[BURST_SIZE];
454 const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
457 if (unlikely(nb_rx == 0))
460 for (i = 0; i < MAX_NUM_CLASSIFY; i++) {
462 ret = rte_flow_classifier_query(
464 bufs, nb_rx, rules[i],
468 "rule [%d] query failed ret [%d]\n\n",
472 "rule[%d] count=%"PRIu64"\n",
473 i, ntuple_stats.counter1);
475 printf("proto = %d\n",
476 ntuple_stats.ipv4_5tuple.proto);
481 /* Send burst of TX packets, to second port of pair. */
482 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
485 /* Free any unsent packets. */
486 if (unlikely(nb_tx < nb_rx)) {
488 for (buf = nb_tx; buf < nb_rx; buf++)
489 rte_pktmbuf_free(bufs[buf]);
495 The main work of the application is done within the loop:
500 RTE_ETH_FOREACH_DEV(port) {
502 /* Get burst of RX packets, from first port of pair. */
503 struct rte_mbuf *bufs[BURST_SIZE];
504 const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
507 if (unlikely(nb_rx == 0))
510 /* Send burst of TX packets, to second port of pair. */
511 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
514 /* Free any unsent packets. */
515 if (unlikely(nb_tx < nb_rx)) {
517 for (buf = nb_tx; buf < nb_rx; buf++)
518 rte_pktmbuf_free(bufs[buf]);
523 Packets are received in bursts on the RX ports and transmitted in bursts on
524 the TX ports. The ports are grouped in pairs with a simple mapping scheme
525 using the an XOR on the port number::
535 The ``rte_eth_tx_burst()`` function frees the memory buffers of packets that
536 are transmitted. If packets fail to transmit, ``(nb_tx < nb_rx)``, then they
537 must be freed explicitly using ``rte_pktmbuf_free()``.
539 The forwarding loop can be interrupted and the application closed using