1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
7 #include <sys/socket.h>
21 #include <rte_common.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
26 #include <rte_launch.h>
27 #include <rte_atomic.h>
28 #include <rte_cycles.h>
29 #include <rte_prefetch.h>
30 #include <rte_lcore.h>
31 #include <rte_per_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_random.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_mempool.h>
43 #include <rte_spinlock.h>
45 #include <cmdline_rdline.h>
46 #include <cmdline_parse.h>
47 #include <cmdline_parse_num.h>
48 #include <cmdline_parse_string.h>
49 #include <cmdline_parse_ipaddr.h>
50 #include <cmdline_parse_etheraddr.h>
51 #include <cmdline_socket.h>
56 #include <rte_devargs.h>
59 #include "rte_byteorder.h"
60 #include "rte_cpuflags.h"
61 #include "rte_eth_bond.h"
63 #define RTE_LOGTYPE_DCB RTE_LOGTYPE_USER1
65 #define NB_MBUF (1024*8)
67 #define MAX_PKT_BURST 32
68 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
69 #define BURST_RX_INTERVAL_NS (10) /* RX poll interval ~100ns */
72 * RX and TX Prefetch, Host, and Write-back threshold values should be
73 * carefully set for optimal performance. Consult the network
74 * controller's datasheet and supporting DPDK documentation for guidance
75 * on how these parameters should be set.
77 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
78 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
79 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
80 #define RX_FTHRESH (MAX_PKT_BURST * 2)/**< Default values of RX free threshold reg. */
83 * These default values are optimized for use with the Intel(R) 82599 10 GbE
84 * Controller and the DPDK ixgbe PMD. Consider using other values for other
85 * network controllers and/or network drivers.
87 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
88 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
89 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
92 * Configurable number of RX/TX ring descriptors
94 #define RTE_RX_DESC_DEFAULT 1024
95 #define RTE_TX_DESC_DEFAULT 1024
102 /* not defined under linux */
104 #define NIPQUAD_FMT "%u.%u.%u.%u"
108 #define PRINT_MAC(addr) printf("%02"PRIx8":%02"PRIx8":%02"PRIx8 \
109 ":%02"PRIx8":%02"PRIx8":%02"PRIx8, \
110 addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], \
111 addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5])
113 uint16_t slaves[RTE_MAX_ETHPORTS];
114 uint16_t slaves_count;
116 static uint16_t BOND_PORT = 0xffff;
118 static struct rte_mempool *mbuf_pool;
120 static struct rte_eth_conf port_conf = {
122 .mq_mode = ETH_MQ_RX_NONE,
123 .max_rx_pkt_len = ETHER_MAX_LEN,
129 .rss_hf = ETH_RSS_IP,
133 .mq_mode = ETH_MQ_TX_NONE,
138 slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
141 uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
142 uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
143 struct rte_eth_dev_info dev_info;
144 struct rte_eth_rxconf rxq_conf;
145 struct rte_eth_txconf txq_conf;
146 struct rte_eth_conf local_port_conf = port_conf;
148 if (!rte_eth_dev_is_valid_port(portid))
149 rte_exit(EXIT_FAILURE, "Invalid port\n");
151 rte_eth_dev_info_get(portid, &dev_info);
152 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
153 local_port_conf.txmode.offloads |=
154 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
156 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
157 dev_info.flow_type_rss_offloads;
158 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
159 port_conf.rx_adv_conf.rss_conf.rss_hf) {
160 printf("Port %u modified RSS hash function based on hardware support,"
161 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
163 port_conf.rx_adv_conf.rss_conf.rss_hf,
164 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
167 retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
169 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
172 retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
174 rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
175 "failed (res=%d)\n", portid, retval);
178 rxq_conf = dev_info.default_rxconf;
179 rxq_conf.offloads = local_port_conf.rxmode.offloads;
180 retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
181 rte_eth_dev_socket_id(portid),
185 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
189 txq_conf = dev_info.default_txconf;
190 txq_conf.offloads = local_port_conf.txmode.offloads;
191 retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
192 rte_eth_dev_socket_id(portid), &txq_conf);
195 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
198 retval = rte_eth_dev_start(portid);
201 "Start port %d failed (res=%d)",
204 struct ether_addr addr;
206 rte_eth_macaddr_get(portid, &addr);
207 printf("Port %u MAC: ", portid);
213 bond_port_init(struct rte_mempool *mbuf_pool)
217 uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
218 uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
219 struct rte_eth_dev_info dev_info;
220 struct rte_eth_rxconf rxq_conf;
221 struct rte_eth_txconf txq_conf;
222 struct rte_eth_conf local_port_conf = port_conf;
223 uint16_t wait_counter = 20;
225 retval = rte_eth_bond_create("net_bonding0", BONDING_MODE_ALB,
226 0 /*SOCKET_ID_ANY*/);
228 rte_exit(EXIT_FAILURE,
229 "Faled to create bond port\n");
233 rte_eth_dev_info_get(BOND_PORT, &dev_info);
234 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
235 local_port_conf.txmode.offloads |=
236 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
237 retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
239 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
242 retval = rte_eth_dev_adjust_nb_rx_tx_desc(BOND_PORT, &nb_rxd, &nb_txd);
244 rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
245 "failed (res=%d)\n", BOND_PORT, retval);
247 for (i = 0; i < slaves_count; i++) {
248 if (rte_eth_bond_slave_add(BOND_PORT, slaves[i]) == -1)
249 rte_exit(-1, "Oooops! adding slave (%u) to bond (%u) failed!\n",
250 slaves[i], BOND_PORT);
255 rxq_conf = dev_info.default_rxconf;
256 rxq_conf.offloads = local_port_conf.rxmode.offloads;
257 retval = rte_eth_rx_queue_setup(BOND_PORT, 0, nb_rxd,
258 rte_eth_dev_socket_id(BOND_PORT),
259 &rxq_conf, mbuf_pool);
261 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
265 txq_conf = dev_info.default_txconf;
266 txq_conf.offloads = local_port_conf.txmode.offloads;
267 retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
268 rte_eth_dev_socket_id(BOND_PORT), &txq_conf);
271 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
274 retval = rte_eth_dev_start(BOND_PORT);
276 rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval);
278 printf("Waiting for slaves to become active...");
279 while (wait_counter) {
280 uint16_t act_slaves[16] = {0};
281 if (rte_eth_bond_active_slaves_get(BOND_PORT, act_slaves, 16) ==
288 if (--wait_counter == 0)
289 rte_exit(-1, "\nFailed to activate slaves\n");
292 rte_eth_promiscuous_enable(BOND_PORT);
294 struct ether_addr addr;
296 rte_eth_macaddr_get(BOND_PORT, &addr);
297 printf("Port %u MAC: ", (unsigned)BOND_PORT);
303 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
305 size_t vlan_offset = 0;
307 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
308 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
310 vlan_offset = sizeof(struct vlan_hdr);
311 *proto = vlan_hdr->eth_proto;
313 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
314 vlan_hdr = vlan_hdr + 1;
316 *proto = vlan_hdr->eth_proto;
317 vlan_offset += sizeof(struct vlan_hdr);
323 struct global_flag_stru_t {
324 int LcoreMainIsRunning;
326 uint32_t port_packets[4];
329 struct global_flag_stru_t global_flag_stru;
330 struct global_flag_stru_t *global_flag_stru_p = &global_flag_stru;
333 * Main thread that does the work, reading from INPUT_PORT
334 * and writing to OUTPUT_PORT
336 static int lcore_main(__attribute__((unused)) void *arg1)
338 struct rte_mbuf *pkts[MAX_PKT_BURST] __rte_cache_aligned;
339 struct ether_addr d_addr;
341 struct ether_hdr *eth_hdr;
342 struct rte_arp_hdr *arp_hdr;
343 struct ipv4_hdr *ipv4_hdr;
344 uint16_t ether_type, offset;
351 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
352 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
354 rte_spinlock_trylock(&global_flag_stru_p->lock);
356 while (global_flag_stru_p->LcoreMainIsRunning) {
357 rte_spinlock_unlock(&global_flag_stru_p->lock);
358 rx_cnt = rte_eth_rx_burst(BOND_PORT, 0, pkts, MAX_PKT_BURST);
361 /* If didn't receive any packets, wait and go to next iteration */
367 /* Search incoming data for ARP packets and prepare response */
368 for (i = 0; i < rx_cnt; i++) {
369 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
370 global_flag_stru_p->port_packets[0]++;
371 rte_spinlock_unlock(&global_flag_stru_p->lock);
373 eth_hdr = rte_pktmbuf_mtod(pkts[i], struct ether_hdr *);
374 ether_type = eth_hdr->ether_type;
375 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))
376 printf("VLAN taged frame, offset:");
377 offset = get_vlan_offset(eth_hdr, ðer_type);
379 printf("%d\n", offset);
380 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
381 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
382 global_flag_stru_p->port_packets[1]++;
383 rte_spinlock_unlock(&global_flag_stru_p->lock);
385 arp_hdr = (struct rte_arp_hdr *)(
386 (char *)(eth_hdr + 1) + offset);
387 if (arp_hdr->arp_data.arp_tip == bond_ip) {
388 if (arp_hdr->arp_opcode == rte_cpu_to_be_16(RTE_ARP_OP_REQUEST)) {
389 arp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REPLY);
390 /* Switch src and dst data and set bonding MAC */
391 ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
392 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
393 ether_addr_copy(&arp_hdr->arp_data.arp_sha, &arp_hdr->arp_data.arp_tha);
394 arp_hdr->arp_data.arp_tip = arp_hdr->arp_data.arp_sip;
395 rte_eth_macaddr_get(BOND_PORT, &d_addr);
396 ether_addr_copy(&d_addr, &arp_hdr->arp_data.arp_sha);
397 arp_hdr->arp_data.arp_sip = bond_ip;
398 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
401 rte_eth_tx_burst(BOND_PORT, 0, NULL, 0);
404 } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
405 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
406 global_flag_stru_p->port_packets[2]++;
407 rte_spinlock_unlock(&global_flag_stru_p->lock);
409 ipv4_hdr = (struct ipv4_hdr *)((char *)(eth_hdr + 1) + offset);
410 if (ipv4_hdr->dst_addr == bond_ip) {
411 ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
412 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
413 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
414 ipv4_hdr->src_addr = bond_ip;
415 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
420 /* Free processed packets */
422 rte_pktmbuf_free(pkts[i]);
424 rte_spinlock_trylock(&global_flag_stru_p->lock);
426 rte_spinlock_unlock(&global_flag_stru_p->lock);
427 printf("BYE lcore_main\n");
431 struct cmd_obj_send_result {
432 cmdline_fixed_string_t action;
435 static inline void get_string(struct cmd_obj_send_result *res, char *buf, uint8_t size)
437 snprintf(buf, size, NIPQUAD_FMT,
438 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[0]),
439 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[1]),
440 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[2]),
441 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[3])
444 static void cmd_obj_send_parsed(void *parsed_result,
445 __attribute__((unused)) struct cmdline *cl,
446 __attribute__((unused)) void *data)
449 struct cmd_obj_send_result *res = parsed_result;
450 char ip_str[INET6_ADDRSTRLEN];
452 struct rte_mbuf *created_pkt;
453 struct ether_hdr *eth_hdr;
454 struct rte_arp_hdr *arp_hdr;
459 if (res->ip.family == AF_INET)
460 get_string(res, ip_str, INET_ADDRSTRLEN);
462 cmdline_printf(cl, "Wrong IP format. Only IPv4 is supported\n");
464 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
465 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
467 created_pkt = rte_pktmbuf_alloc(mbuf_pool);
468 if (created_pkt == NULL) {
469 cmdline_printf(cl, "Failed to allocate mbuf\n");
473 pkt_size = sizeof(struct ether_hdr) + sizeof(struct rte_arp_hdr);
474 created_pkt->data_len = pkt_size;
475 created_pkt->pkt_len = pkt_size;
477 eth_hdr = rte_pktmbuf_mtod(created_pkt, struct ether_hdr *);
478 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
479 memset(ð_hdr->d_addr, 0xFF, ETHER_ADDR_LEN);
480 eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);
482 arp_hdr = (struct rte_arp_hdr *)(
483 (char *)eth_hdr + sizeof(struct ether_hdr));
484 arp_hdr->arp_hardware = rte_cpu_to_be_16(RTE_ARP_HRD_ETHER);
485 arp_hdr->arp_protocol = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
486 arp_hdr->arp_hlen = ETHER_ADDR_LEN;
487 arp_hdr->arp_plen = sizeof(uint32_t);
488 arp_hdr->arp_opcode = rte_cpu_to_be_16(RTE_ARP_OP_REQUEST);
490 rte_eth_macaddr_get(BOND_PORT, &arp_hdr->arp_data.arp_sha);
491 arp_hdr->arp_data.arp_sip = bond_ip;
492 memset(&arp_hdr->arp_data.arp_tha, 0, ETHER_ADDR_LEN);
493 arp_hdr->arp_data.arp_tip =
494 ((unsigned char *)&res->ip.addr.ipv4)[0] |
495 (((unsigned char *)&res->ip.addr.ipv4)[1] << 8) |
496 (((unsigned char *)&res->ip.addr.ipv4)[2] << 16) |
497 (((unsigned char *)&res->ip.addr.ipv4)[3] << 24);
498 rte_eth_tx_burst(BOND_PORT, 0, &created_pkt, 1);
501 cmdline_printf(cl, "\n");
504 cmdline_parse_token_string_t cmd_obj_action_send =
505 TOKEN_STRING_INITIALIZER(struct cmd_obj_send_result, action, "send");
506 cmdline_parse_token_ipaddr_t cmd_obj_ip =
507 TOKEN_IPV4_INITIALIZER(struct cmd_obj_send_result, ip);
509 cmdline_parse_inst_t cmd_obj_send = {
510 .f = cmd_obj_send_parsed, /* function to call */
511 .data = NULL, /* 2nd arg of func */
512 .help_str = "send client_ip",
513 .tokens = { /* token list, NULL terminated */
514 (void *)&cmd_obj_action_send,
520 struct cmd_start_result {
521 cmdline_fixed_string_t start;
524 static void cmd_start_parsed(__attribute__((unused)) void *parsed_result,
526 __attribute__((unused)) void *data)
528 int slave_core_id = rte_lcore_id();
530 rte_spinlock_trylock(&global_flag_stru_p->lock);
531 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
532 if (lcore_config[global_flag_stru_p->LcoreMainCore].state != WAIT) {
533 rte_spinlock_unlock(&global_flag_stru_p->lock);
536 rte_spinlock_unlock(&global_flag_stru_p->lock);
538 cmdline_printf(cl, "lcore_main already running on core:%d\n",
539 global_flag_stru_p->LcoreMainCore);
540 rte_spinlock_unlock(&global_flag_stru_p->lock);
544 /* start lcore main on core != master_core - ARP response thread */
545 slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
546 if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
549 rte_spinlock_trylock(&global_flag_stru_p->lock);
550 global_flag_stru_p->LcoreMainIsRunning = 1;
551 rte_spinlock_unlock(&global_flag_stru_p->lock);
553 "Starting lcore_main on core %d:%d "
554 "Our IP:%d.%d.%d.%d\n",
556 rte_eal_remote_launch(lcore_main, NULL, slave_core_id),
564 cmdline_parse_token_string_t cmd_start_start =
565 TOKEN_STRING_INITIALIZER(struct cmd_start_result, start, "start");
567 cmdline_parse_inst_t cmd_start = {
568 .f = cmd_start_parsed, /* function to call */
569 .data = NULL, /* 2nd arg of func */
570 .help_str = "starts listening if not started at startup",
571 .tokens = { /* token list, NULL terminated */
572 (void *)&cmd_start_start,
577 struct cmd_help_result {
578 cmdline_fixed_string_t help;
581 static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
583 __attribute__((unused)) void *data)
586 "ALB - link bonding mode 6 example\n"
587 "send IP - sends one ARPrequest through bonding for IP.\n"
588 "start - starts listening ARPs.\n"
589 "stop - stops lcore_main.\n"
590 "show - shows some bond info: ex. active slaves etc.\n"
591 "help - prints help.\n"
592 "quit - terminate all threads and quit.\n"
596 cmdline_parse_token_string_t cmd_help_help =
597 TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
599 cmdline_parse_inst_t cmd_help = {
600 .f = cmd_help_parsed, /* function to call */
601 .data = NULL, /* 2nd arg of func */
602 .help_str = "show help",
603 .tokens = { /* token list, NULL terminated */
604 (void *)&cmd_help_help,
609 struct cmd_stop_result {
610 cmdline_fixed_string_t stop;
613 static void cmd_stop_parsed(__attribute__((unused)) void *parsed_result,
615 __attribute__((unused)) void *data)
617 rte_spinlock_trylock(&global_flag_stru_p->lock);
618 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
620 "lcore_main not running on core:%d\n",
621 global_flag_stru_p->LcoreMainCore);
622 rte_spinlock_unlock(&global_flag_stru_p->lock);
625 global_flag_stru_p->LcoreMainIsRunning = 0;
626 if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0)
628 "error: lcore_main can not stop on core:%d\n",
629 global_flag_stru_p->LcoreMainCore);
632 "lcore_main stopped on core:%d\n",
633 global_flag_stru_p->LcoreMainCore);
634 rte_spinlock_unlock(&global_flag_stru_p->lock);
637 cmdline_parse_token_string_t cmd_stop_stop =
638 TOKEN_STRING_INITIALIZER(struct cmd_stop_result, stop, "stop");
640 cmdline_parse_inst_t cmd_stop = {
641 .f = cmd_stop_parsed, /* function to call */
642 .data = NULL, /* 2nd arg of func */
643 .help_str = "this command do not handle any arguments",
644 .tokens = { /* token list, NULL terminated */
645 (void *)&cmd_stop_stop,
650 struct cmd_quit_result {
651 cmdline_fixed_string_t quit;
654 static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
656 __attribute__((unused)) void *data)
658 rte_spinlock_trylock(&global_flag_stru_p->lock);
659 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
661 "lcore_main not running on core:%d\n",
662 global_flag_stru_p->LcoreMainCore);
663 rte_spinlock_unlock(&global_flag_stru_p->lock);
667 global_flag_stru_p->LcoreMainIsRunning = 0;
668 if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0)
670 "error: lcore_main can not stop on core:%d\n",
671 global_flag_stru_p->LcoreMainCore);
674 "lcore_main stopped on core:%d\n",
675 global_flag_stru_p->LcoreMainCore);
676 rte_spinlock_unlock(&global_flag_stru_p->lock);
680 cmdline_parse_token_string_t cmd_quit_quit =
681 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
683 cmdline_parse_inst_t cmd_quit = {
684 .f = cmd_quit_parsed, /* function to call */
685 .data = NULL, /* 2nd arg of func */
686 .help_str = "this command do not handle any arguments",
687 .tokens = { /* token list, NULL terminated */
688 (void *)&cmd_quit_quit,
693 struct cmd_show_result {
694 cmdline_fixed_string_t show;
697 static void cmd_show_parsed(__attribute__((unused)) void *parsed_result,
699 __attribute__((unused)) void *data)
701 uint16_t slaves[16] = {0};
703 struct ether_addr addr;
706 while (i < slaves_count) {
707 rte_eth_macaddr_get(i, &addr);
713 rte_spinlock_trylock(&global_flag_stru_p->lock);
716 "packets received:Tot:%d Arp:%d IPv4:%d\n",
717 rte_eth_bond_active_slaves_get(BOND_PORT, slaves, len),
718 global_flag_stru_p->port_packets[0],
719 global_flag_stru_p->port_packets[1],
720 global_flag_stru_p->port_packets[2]);
721 rte_spinlock_unlock(&global_flag_stru_p->lock);
724 cmdline_parse_token_string_t cmd_show_show =
725 TOKEN_STRING_INITIALIZER(struct cmd_show_result, show, "show");
727 cmdline_parse_inst_t cmd_show = {
728 .f = cmd_show_parsed, /* function to call */
729 .data = NULL, /* 2nd arg of func */
730 .help_str = "this command do not handle any arguments",
731 .tokens = { /* token list, NULL terminated */
732 (void *)&cmd_show_show,
737 /****** CONTEXT (list of instruction) */
739 cmdline_parse_ctx_t main_ctx[] = {
740 (cmdline_parse_inst_t *)&cmd_start,
741 (cmdline_parse_inst_t *)&cmd_obj_send,
742 (cmdline_parse_inst_t *)&cmd_stop,
743 (cmdline_parse_inst_t *)&cmd_show,
744 (cmdline_parse_inst_t *)&cmd_quit,
745 (cmdline_parse_inst_t *)&cmd_help,
749 /* prompt function, called from main on MASTER lcore */
750 static void prompt(__attribute__((unused)) void *arg1)
754 cl = cmdline_stdin_new(main_ctx, "bond6>");
756 cmdline_interact(cl);
757 cmdline_stdin_exit(cl);
761 /* Main function, does initialisation and calls the per-lcore functions */
763 main(int argc, char *argv[])
766 uint16_t nb_ports, i;
769 ret = rte_eal_init(argc, argv);
770 rte_devargs_dump(stdout);
772 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
776 nb_ports = rte_eth_dev_count_avail();
778 rte_exit(EXIT_FAILURE, "Give at least one port\n");
779 else if (nb_ports > MAX_PORTS)
780 rte_exit(EXIT_FAILURE, "You can have max 4 ports\n");
782 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NB_MBUF, 32,
783 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
784 if (mbuf_pool == NULL)
785 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
787 /* initialize all ports */
788 slaves_count = nb_ports;
789 RTE_ETH_FOREACH_DEV(i) {
790 slave_port_init(i, mbuf_pool);
794 bond_port_init(mbuf_pool);
796 rte_spinlock_init(&global_flag_stru_p->lock);
797 int slave_core_id = rte_lcore_id();
799 /* check state of lcores */
800 RTE_LCORE_FOREACH_SLAVE(slave_core_id) {
801 if (lcore_config[slave_core_id].state != WAIT)
804 /* start lcore main on core != master_core - ARP response thread */
805 slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
806 if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
809 global_flag_stru_p->LcoreMainIsRunning = 1;
810 global_flag_stru_p->LcoreMainCore = slave_core_id;
811 printf("Starting lcore_main on core %d:%d Our IP:%d.%d.%d.%d\n",
813 rte_eal_remote_launch((lcore_function_t *)lcore_main,
822 /* Start prompt for user interact */