1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
7 #include <sys/socket.h>
21 #include <rte_common.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
26 #include <rte_launch.h>
27 #include <rte_atomic.h>
28 #include <rte_cycles.h>
29 #include <rte_prefetch.h>
30 #include <rte_lcore.h>
31 #include <rte_per_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_random.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_mempool.h>
43 #include <rte_spinlock.h>
45 #include <cmdline_rdline.h>
46 #include <cmdline_parse.h>
47 #include <cmdline_parse_num.h>
48 #include <cmdline_parse_string.h>
49 #include <cmdline_parse_ipaddr.h>
50 #include <cmdline_parse_etheraddr.h>
51 #include <cmdline_socket.h>
56 #include <rte_devargs.h>
59 #include "rte_byteorder.h"
60 #include "rte_cpuflags.h"
61 #include "rte_eth_bond.h"
63 #define RTE_LOGTYPE_DCB RTE_LOGTYPE_USER1
65 #define NB_MBUF (1024*8)
67 #define MAX_PKT_BURST 32
68 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
69 #define BURST_RX_INTERVAL_NS (10) /* RX poll interval ~100ns */
72 * RX and TX Prefetch, Host, and Write-back threshold values should be
73 * carefully set for optimal performance. Consult the network
74 * controller's datasheet and supporting DPDK documentation for guidance
75 * on how these parameters should be set.
77 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
78 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
79 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
80 #define RX_FTHRESH (MAX_PKT_BURST * 2)/**< Default values of RX free threshold reg. */
83 * These default values are optimized for use with the Intel(R) 82599 10 GbE
84 * Controller and the DPDK ixgbe PMD. Consider using other values for other
85 * network controllers and/or network drivers.
87 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
88 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
89 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
92 * Configurable number of RX/TX ring descriptors
94 #define RTE_RX_DESC_DEFAULT 1024
95 #define RTE_TX_DESC_DEFAULT 1024
102 /* not defined under linux */
104 #define NIPQUAD_FMT "%u.%u.%u.%u"
108 #define PRINT_MAC(addr) printf("%02"PRIx8":%02"PRIx8":%02"PRIx8 \
109 ":%02"PRIx8":%02"PRIx8":%02"PRIx8, \
110 addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], \
111 addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5])
113 uint16_t slaves[RTE_MAX_ETHPORTS];
114 uint16_t slaves_count;
116 static uint16_t BOND_PORT = 0xffff;
118 static struct rte_mempool *mbuf_pool;
120 static struct rte_eth_conf port_conf = {
122 .mq_mode = ETH_MQ_RX_NONE,
123 .max_rx_pkt_len = ETHER_MAX_LEN,
129 .rss_hf = ETH_RSS_IP,
133 .mq_mode = ETH_MQ_TX_NONE,
138 slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
141 uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
142 uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
143 struct rte_eth_dev_info dev_info;
144 struct rte_eth_rxconf rxq_conf;
145 struct rte_eth_txconf txq_conf;
146 struct rte_eth_conf local_port_conf = port_conf;
148 if (!rte_eth_dev_is_valid_port(portid))
149 rte_exit(EXIT_FAILURE, "Invalid port\n");
151 rte_eth_dev_info_get(portid, &dev_info);
152 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
153 local_port_conf.txmode.offloads |=
154 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
156 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
157 dev_info.flow_type_rss_offloads;
158 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
159 port_conf.rx_adv_conf.rss_conf.rss_hf) {
160 printf("Port %u modified RSS hash function based on hardware support,"
161 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
163 port_conf.rx_adv_conf.rss_conf.rss_hf,
164 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
167 retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
169 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
172 retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
174 rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
175 "failed (res=%d)\n", portid, retval);
178 rxq_conf = dev_info.default_rxconf;
179 rxq_conf.offloads = local_port_conf.rxmode.offloads;
180 retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
181 rte_eth_dev_socket_id(portid),
185 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
189 txq_conf = dev_info.default_txconf;
190 txq_conf.offloads = local_port_conf.txmode.offloads;
191 retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
192 rte_eth_dev_socket_id(portid), &txq_conf);
195 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
198 retval = rte_eth_dev_start(portid);
201 "Start port %d failed (res=%d)",
204 struct ether_addr addr;
206 rte_eth_macaddr_get(portid, &addr);
207 printf("Port %u MAC: ", portid);
213 bond_port_init(struct rte_mempool *mbuf_pool)
217 uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
218 uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
219 struct rte_eth_dev_info dev_info;
220 struct rte_eth_rxconf rxq_conf;
221 struct rte_eth_txconf txq_conf;
222 struct rte_eth_conf local_port_conf = port_conf;
223 uint16_t wait_counter = 20;
225 retval = rte_eth_bond_create("net_bonding0", BONDING_MODE_ALB,
226 0 /*SOCKET_ID_ANY*/);
228 rte_exit(EXIT_FAILURE,
229 "Faled to create bond port\n");
233 rte_eth_dev_info_get(BOND_PORT, &dev_info);
234 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
235 local_port_conf.txmode.offloads |=
236 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
237 retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
239 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
242 retval = rte_eth_dev_adjust_nb_rx_tx_desc(BOND_PORT, &nb_rxd, &nb_txd);
244 rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
245 "failed (res=%d)\n", BOND_PORT, retval);
247 for (i = 0; i < slaves_count; i++) {
248 if (rte_eth_bond_slave_add(BOND_PORT, slaves[i]) == -1)
249 rte_exit(-1, "Oooops! adding slave (%u) to bond (%u) failed!\n",
250 slaves[i], BOND_PORT);
255 rxq_conf = dev_info.default_rxconf;
256 rxq_conf.offloads = local_port_conf.rxmode.offloads;
257 retval = rte_eth_rx_queue_setup(BOND_PORT, 0, nb_rxd,
258 rte_eth_dev_socket_id(BOND_PORT),
259 &rxq_conf, mbuf_pool);
261 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
265 txq_conf = dev_info.default_txconf;
266 txq_conf.offloads = local_port_conf.txmode.offloads;
267 retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
268 rte_eth_dev_socket_id(BOND_PORT), &txq_conf);
271 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
274 retval = rte_eth_dev_start(BOND_PORT);
276 rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval);
278 printf("Waiting for slaves to become active...");
279 while (wait_counter) {
280 uint16_t act_slaves[16] = {0};
281 if (rte_eth_bond_active_slaves_get(BOND_PORT, act_slaves, 16) ==
288 if (--wait_counter == 0)
289 rte_exit(-1, "\nFailed to activate slaves\n");
292 rte_eth_promiscuous_enable(BOND_PORT);
294 struct ether_addr addr;
296 rte_eth_macaddr_get(BOND_PORT, &addr);
297 printf("Port %u MAC: ", (unsigned)BOND_PORT);
303 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
305 size_t vlan_offset = 0;
307 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
308 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
310 vlan_offset = sizeof(struct vlan_hdr);
311 *proto = vlan_hdr->eth_proto;
313 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
314 vlan_hdr = vlan_hdr + 1;
316 *proto = vlan_hdr->eth_proto;
317 vlan_offset += sizeof(struct vlan_hdr);
323 struct global_flag_stru_t {
324 int LcoreMainIsRunning;
326 uint32_t port_packets[4];
329 struct global_flag_stru_t global_flag_stru;
330 struct global_flag_stru_t *global_flag_stru_p = &global_flag_stru;
333 * Main thread that does the work, reading from INPUT_PORT
334 * and writing to OUTPUT_PORT
336 static int lcore_main(__attribute__((unused)) void *arg1)
338 struct rte_mbuf *pkts[MAX_PKT_BURST] __rte_cache_aligned;
339 struct ether_addr d_addr;
341 struct ether_hdr *eth_hdr;
342 struct arp_hdr *arp_hdr;
343 struct ipv4_hdr *ipv4_hdr;
344 uint16_t ether_type, offset;
351 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
352 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
354 rte_spinlock_trylock(&global_flag_stru_p->lock);
356 while (global_flag_stru_p->LcoreMainIsRunning) {
357 rte_spinlock_unlock(&global_flag_stru_p->lock);
358 rx_cnt = rte_eth_rx_burst(BOND_PORT, 0, pkts, MAX_PKT_BURST);
361 /* If didn't receive any packets, wait and go to next iteration */
367 /* Search incoming data for ARP packets and prepare response */
368 for (i = 0; i < rx_cnt; i++) {
369 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
370 global_flag_stru_p->port_packets[0]++;
371 rte_spinlock_unlock(&global_flag_stru_p->lock);
373 eth_hdr = rte_pktmbuf_mtod(pkts[i], struct ether_hdr *);
374 ether_type = eth_hdr->ether_type;
375 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))
376 printf("VLAN taged frame, offset:");
377 offset = get_vlan_offset(eth_hdr, ðer_type);
379 printf("%d\n", offset);
380 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
381 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
382 global_flag_stru_p->port_packets[1]++;
383 rte_spinlock_unlock(&global_flag_stru_p->lock);
385 arp_hdr = (struct arp_hdr *)((char *)(eth_hdr + 1) + offset);
386 if (arp_hdr->arp_data.arp_tip == bond_ip) {
387 if (arp_hdr->arp_op == rte_cpu_to_be_16(ARP_OP_REQUEST)) {
388 arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
389 /* Switch src and dst data and set bonding MAC */
390 ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
391 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
392 ether_addr_copy(&arp_hdr->arp_data.arp_sha, &arp_hdr->arp_data.arp_tha);
393 arp_hdr->arp_data.arp_tip = arp_hdr->arp_data.arp_sip;
394 rte_eth_macaddr_get(BOND_PORT, &d_addr);
395 ether_addr_copy(&d_addr, &arp_hdr->arp_data.arp_sha);
396 arp_hdr->arp_data.arp_sip = bond_ip;
397 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
400 rte_eth_tx_burst(BOND_PORT, 0, NULL, 0);
403 } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
404 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
405 global_flag_stru_p->port_packets[2]++;
406 rte_spinlock_unlock(&global_flag_stru_p->lock);
408 ipv4_hdr = (struct ipv4_hdr *)((char *)(eth_hdr + 1) + offset);
409 if (ipv4_hdr->dst_addr == bond_ip) {
410 ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
411 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
412 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
413 ipv4_hdr->src_addr = bond_ip;
414 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
419 /* Free processed packets */
421 rte_pktmbuf_free(pkts[i]);
423 rte_spinlock_trylock(&global_flag_stru_p->lock);
425 rte_spinlock_unlock(&global_flag_stru_p->lock);
426 printf("BYE lcore_main\n");
430 struct cmd_obj_send_result {
431 cmdline_fixed_string_t action;
434 static inline void get_string(struct cmd_obj_send_result *res, char *buf, uint8_t size)
436 snprintf(buf, size, NIPQUAD_FMT,
437 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[0]),
438 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[1]),
439 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[2]),
440 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[3])
443 static void cmd_obj_send_parsed(void *parsed_result,
444 __attribute__((unused)) struct cmdline *cl,
445 __attribute__((unused)) void *data)
448 struct cmd_obj_send_result *res = parsed_result;
449 char ip_str[INET6_ADDRSTRLEN];
451 struct rte_mbuf *created_pkt;
452 struct ether_hdr *eth_hdr;
453 struct arp_hdr *arp_hdr;
458 if (res->ip.family == AF_INET)
459 get_string(res, ip_str, INET_ADDRSTRLEN);
461 cmdline_printf(cl, "Wrong IP format. Only IPv4 is supported\n");
463 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
464 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
466 created_pkt = rte_pktmbuf_alloc(mbuf_pool);
467 if (created_pkt == NULL) {
468 cmdline_printf(cl, "Failed to allocate mbuf\n");
472 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr);
473 created_pkt->data_len = pkt_size;
474 created_pkt->pkt_len = pkt_size;
476 eth_hdr = rte_pktmbuf_mtod(created_pkt, struct ether_hdr *);
477 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
478 memset(ð_hdr->d_addr, 0xFF, ETHER_ADDR_LEN);
479 eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);
481 arp_hdr = (struct arp_hdr *)((char *)eth_hdr + sizeof(struct ether_hdr));
482 arp_hdr->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER);
483 arp_hdr->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
484 arp_hdr->arp_hln = ETHER_ADDR_LEN;
485 arp_hdr->arp_pln = sizeof(uint32_t);
486 arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REQUEST);
488 rte_eth_macaddr_get(BOND_PORT, &arp_hdr->arp_data.arp_sha);
489 arp_hdr->arp_data.arp_sip = bond_ip;
490 memset(&arp_hdr->arp_data.arp_tha, 0, ETHER_ADDR_LEN);
491 arp_hdr->arp_data.arp_tip =
492 ((unsigned char *)&res->ip.addr.ipv4)[0] |
493 (((unsigned char *)&res->ip.addr.ipv4)[1] << 8) |
494 (((unsigned char *)&res->ip.addr.ipv4)[2] << 16) |
495 (((unsigned char *)&res->ip.addr.ipv4)[3] << 24);
496 rte_eth_tx_burst(BOND_PORT, 0, &created_pkt, 1);
499 cmdline_printf(cl, "\n");
502 cmdline_parse_token_string_t cmd_obj_action_send =
503 TOKEN_STRING_INITIALIZER(struct cmd_obj_send_result, action, "send");
504 cmdline_parse_token_ipaddr_t cmd_obj_ip =
505 TOKEN_IPV4_INITIALIZER(struct cmd_obj_send_result, ip);
507 cmdline_parse_inst_t cmd_obj_send = {
508 .f = cmd_obj_send_parsed, /* function to call */
509 .data = NULL, /* 2nd arg of func */
510 .help_str = "send client_ip",
511 .tokens = { /* token list, NULL terminated */
512 (void *)&cmd_obj_action_send,
518 struct cmd_start_result {
519 cmdline_fixed_string_t start;
522 static void cmd_start_parsed(__attribute__((unused)) void *parsed_result,
524 __attribute__((unused)) void *data)
526 int slave_core_id = rte_lcore_id();
528 rte_spinlock_trylock(&global_flag_stru_p->lock);
529 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
530 if (lcore_config[global_flag_stru_p->LcoreMainCore].state != WAIT) {
531 rte_spinlock_unlock(&global_flag_stru_p->lock);
534 rte_spinlock_unlock(&global_flag_stru_p->lock);
536 cmdline_printf(cl, "lcore_main already running on core:%d\n",
537 global_flag_stru_p->LcoreMainCore);
538 rte_spinlock_unlock(&global_flag_stru_p->lock);
542 /* start lcore main on core != master_core - ARP response thread */
543 slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
544 if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
547 rte_spinlock_trylock(&global_flag_stru_p->lock);
548 global_flag_stru_p->LcoreMainIsRunning = 1;
549 rte_spinlock_unlock(&global_flag_stru_p->lock);
551 "Starting lcore_main on core %d:%d "
552 "Our IP:%d.%d.%d.%d\n",
554 rte_eal_remote_launch(lcore_main, NULL, slave_core_id),
562 cmdline_parse_token_string_t cmd_start_start =
563 TOKEN_STRING_INITIALIZER(struct cmd_start_result, start, "start");
565 cmdline_parse_inst_t cmd_start = {
566 .f = cmd_start_parsed, /* function to call */
567 .data = NULL, /* 2nd arg of func */
568 .help_str = "starts listening if not started at startup",
569 .tokens = { /* token list, NULL terminated */
570 (void *)&cmd_start_start,
575 struct cmd_help_result {
576 cmdline_fixed_string_t help;
579 static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
581 __attribute__((unused)) void *data)
584 "ALB - link bonding mode 6 example\n"
585 "send IP - sends one ARPrequest through bonding for IP.\n"
586 "start - starts listening ARPs.\n"
587 "stop - stops lcore_main.\n"
588 "show - shows some bond info: ex. active slaves etc.\n"
589 "help - prints help.\n"
590 "quit - terminate all threads and quit.\n"
594 cmdline_parse_token_string_t cmd_help_help =
595 TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
597 cmdline_parse_inst_t cmd_help = {
598 .f = cmd_help_parsed, /* function to call */
599 .data = NULL, /* 2nd arg of func */
600 .help_str = "show help",
601 .tokens = { /* token list, NULL terminated */
602 (void *)&cmd_help_help,
607 struct cmd_stop_result {
608 cmdline_fixed_string_t stop;
611 static void cmd_stop_parsed(__attribute__((unused)) void *parsed_result,
613 __attribute__((unused)) void *data)
615 rte_spinlock_trylock(&global_flag_stru_p->lock);
616 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
618 "lcore_main not running on core:%d\n",
619 global_flag_stru_p->LcoreMainCore);
620 rte_spinlock_unlock(&global_flag_stru_p->lock);
623 global_flag_stru_p->LcoreMainIsRunning = 0;
624 if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0)
626 "error: lcore_main can not stop on core:%d\n",
627 global_flag_stru_p->LcoreMainCore);
630 "lcore_main stopped on core:%d\n",
631 global_flag_stru_p->LcoreMainCore);
632 rte_spinlock_unlock(&global_flag_stru_p->lock);
635 cmdline_parse_token_string_t cmd_stop_stop =
636 TOKEN_STRING_INITIALIZER(struct cmd_stop_result, stop, "stop");
638 cmdline_parse_inst_t cmd_stop = {
639 .f = cmd_stop_parsed, /* function to call */
640 .data = NULL, /* 2nd arg of func */
641 .help_str = "this command do not handle any arguments",
642 .tokens = { /* token list, NULL terminated */
643 (void *)&cmd_stop_stop,
648 struct cmd_quit_result {
649 cmdline_fixed_string_t quit;
652 static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
654 __attribute__((unused)) void *data)
656 rte_spinlock_trylock(&global_flag_stru_p->lock);
657 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
659 "lcore_main not running on core:%d\n",
660 global_flag_stru_p->LcoreMainCore);
661 rte_spinlock_unlock(&global_flag_stru_p->lock);
665 global_flag_stru_p->LcoreMainIsRunning = 0;
666 if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0)
668 "error: lcore_main can not stop on core:%d\n",
669 global_flag_stru_p->LcoreMainCore);
672 "lcore_main stopped on core:%d\n",
673 global_flag_stru_p->LcoreMainCore);
674 rte_spinlock_unlock(&global_flag_stru_p->lock);
678 cmdline_parse_token_string_t cmd_quit_quit =
679 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
681 cmdline_parse_inst_t cmd_quit = {
682 .f = cmd_quit_parsed, /* function to call */
683 .data = NULL, /* 2nd arg of func */
684 .help_str = "this command do not handle any arguments",
685 .tokens = { /* token list, NULL terminated */
686 (void *)&cmd_quit_quit,
691 struct cmd_show_result {
692 cmdline_fixed_string_t show;
695 static void cmd_show_parsed(__attribute__((unused)) void *parsed_result,
697 __attribute__((unused)) void *data)
699 uint16_t slaves[16] = {0};
701 struct ether_addr addr;
704 while (i < slaves_count) {
705 rte_eth_macaddr_get(i, &addr);
711 rte_spinlock_trylock(&global_flag_stru_p->lock);
714 "packets received:Tot:%d Arp:%d IPv4:%d\n",
715 rte_eth_bond_active_slaves_get(BOND_PORT, slaves, len),
716 global_flag_stru_p->port_packets[0],
717 global_flag_stru_p->port_packets[1],
718 global_flag_stru_p->port_packets[2]);
719 rte_spinlock_unlock(&global_flag_stru_p->lock);
722 cmdline_parse_token_string_t cmd_show_show =
723 TOKEN_STRING_INITIALIZER(struct cmd_show_result, show, "show");
725 cmdline_parse_inst_t cmd_show = {
726 .f = cmd_show_parsed, /* function to call */
727 .data = NULL, /* 2nd arg of func */
728 .help_str = "this command do not handle any arguments",
729 .tokens = { /* token list, NULL terminated */
730 (void *)&cmd_show_show,
735 /****** CONTEXT (list of instruction) */
737 cmdline_parse_ctx_t main_ctx[] = {
738 (cmdline_parse_inst_t *)&cmd_start,
739 (cmdline_parse_inst_t *)&cmd_obj_send,
740 (cmdline_parse_inst_t *)&cmd_stop,
741 (cmdline_parse_inst_t *)&cmd_show,
742 (cmdline_parse_inst_t *)&cmd_quit,
743 (cmdline_parse_inst_t *)&cmd_help,
747 /* prompt function, called from main on MASTER lcore */
748 static void prompt(__attribute__((unused)) void *arg1)
752 cl = cmdline_stdin_new(main_ctx, "bond6>");
754 cmdline_interact(cl);
755 cmdline_stdin_exit(cl);
759 /* Main function, does initialisation and calls the per-lcore functions */
761 main(int argc, char *argv[])
764 uint16_t nb_ports, i;
767 ret = rte_eal_init(argc, argv);
768 rte_devargs_dump(stdout);
770 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
774 nb_ports = rte_eth_dev_count_avail();
776 rte_exit(EXIT_FAILURE, "Give at least one port\n");
777 else if (nb_ports > MAX_PORTS)
778 rte_exit(EXIT_FAILURE, "You can have max 4 ports\n");
780 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NB_MBUF, 32,
781 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
782 if (mbuf_pool == NULL)
783 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
785 /* initialize all ports */
786 slaves_count = nb_ports;
787 RTE_ETH_FOREACH_DEV(i) {
788 slave_port_init(i, mbuf_pool);
792 bond_port_init(mbuf_pool);
794 rte_spinlock_init(&global_flag_stru_p->lock);
795 int slave_core_id = rte_lcore_id();
797 /* check state of lcores */
798 RTE_LCORE_FOREACH_SLAVE(slave_core_id) {
799 if (lcore_config[slave_core_id].state != WAIT)
802 /* start lcore main on core != master_core - ARP response thread */
803 slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
804 if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
807 global_flag_stru_p->LcoreMainIsRunning = 1;
808 global_flag_stru_p->LcoreMainCore = slave_core_id;
809 printf("Starting lcore_main on core %d:%d Our IP:%d.%d.%d.%d\n",
811 rte_eal_remote_launch((lcore_function_t *)lcore_main,
820 /* Start prompt for user interact */