4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
49 #include <rte_common.h>
51 #include <rte_memory.h>
52 #include <rte_memcpy.h>
53 #include <rte_memzone.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_launch.h>
58 #include <rte_atomic.h>
59 #include <rte_cycles.h>
60 #include <rte_prefetch.h>
61 #include <rte_lcore.h>
62 #include <rte_per_lcore.h>
63 #include <rte_branch_prediction.h>
64 #include <rte_interrupts.h>
66 #include <rte_random.h>
67 #include <rte_debug.h>
68 #include <rte_ether.h>
69 #include <rte_ethdev.h>
72 #include <rte_mempool.h>
74 #include <rte_memcpy.h>
78 #include <rte_spinlock.h>
80 #include <cmdline_rdline.h>
81 #include <cmdline_parse.h>
82 #include <cmdline_parse_num.h>
83 #include <cmdline_parse_string.h>
84 #include <cmdline_parse_ipaddr.h>
85 #include <cmdline_parse_etheraddr.h>
86 #include <cmdline_socket.h>
91 #include <rte_devargs.h>
94 #include "rte_byteorder.h"
95 #include "rte_cpuflags.h"
96 #include "rte_eth_bond.h"
98 #define RTE_LOGTYPE_DCB RTE_LOGTYPE_USER1
100 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
101 #define NB_MBUF (1024*8)
103 #define MAX_PKT_BURST 32
104 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
105 #define BURST_RX_INTERVAL_NS (10) /* RX poll interval ~100ns */
108 * RX and TX Prefetch, Host, and Write-back threshold values should be
109 * carefully set for optimal performance. Consult the network
110 * controller's datasheet and supporting DPDK documentation for guidance
111 * on how these parameters should be set.
113 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
114 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
115 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
116 #define RX_FTHRESH (MAX_PKT_BURST * 2)/**< Default values of RX free threshold reg. */
119 * These default values are optimized for use with the Intel(R) 82599 10 GbE
120 * Controller and the DPDK ixgbe PMD. Consider using other values for other
121 * network controllers and/or network drivers.
123 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
124 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
125 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
128 * Configurable number of RX/TX ring descriptors
130 #define RTE_RX_DESC_DEFAULT 128
131 #define RTE_TX_DESC_DEFAULT 512
138 /* not defined under linux */
140 #define NIPQUAD_FMT "%u.%u.%u.%u"
144 #define PRINT_MAC(addr) printf("%02"PRIx8":%02"PRIx8":%02"PRIx8 \
145 ":%02"PRIx8":%02"PRIx8":%02"PRIx8, \
146 addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], \
147 addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5])
149 uint8_t slaves[RTE_MAX_ETHPORTS];
150 uint8_t slaves_count;
152 static uint8_t BOND_PORT = 0xff;
154 static struct rte_mempool *mbuf_pool;
156 static struct rte_eth_conf port_conf = {
158 .mq_mode = ETH_MQ_RX_NONE,
159 .max_rx_pkt_len = ETHER_MAX_LEN,
161 .header_split = 0, /**< Header Split disabled */
162 .hw_ip_checksum = 0, /**< IP checksum offload enabled */
163 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
164 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
165 .hw_strip_crc = 0, /**< CRC stripped by hardware */
170 .rss_hf = ETH_RSS_IP,
174 .mq_mode = ETH_MQ_TX_NONE,
179 slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
183 if (portid >= rte_eth_dev_count())
184 rte_exit(EXIT_FAILURE, "Invalid port\n");
186 retval = rte_eth_dev_configure(portid, 1, 1, &port_conf);
188 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
192 retval = rte_eth_rx_queue_setup(portid, 0, RTE_RX_DESC_DEFAULT,
193 rte_eth_dev_socket_id(portid), NULL,
196 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
200 retval = rte_eth_tx_queue_setup(portid, 0, RTE_TX_DESC_DEFAULT,
201 rte_eth_dev_socket_id(portid), NULL);
204 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
207 retval = rte_eth_dev_start(portid);
210 "Start port %d failed (res=%d)",
213 struct ether_addr addr;
215 rte_eth_macaddr_get(portid, &addr);
216 printf("Port %u MAC: ", (unsigned)portid);
222 bond_port_init(struct rte_mempool *mbuf_pool)
227 retval = rte_eth_bond_create("bond0", BONDING_MODE_ALB,
228 0 /*SOCKET_ID_ANY*/);
230 rte_exit(EXIT_FAILURE,
231 "Faled to create bond port\n");
233 BOND_PORT = (uint8_t)retval;
235 retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &port_conf);
237 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
241 retval = rte_eth_rx_queue_setup(BOND_PORT, 0, RTE_RX_DESC_DEFAULT,
242 rte_eth_dev_socket_id(BOND_PORT), NULL,
245 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
249 retval = rte_eth_tx_queue_setup(BOND_PORT, 0, RTE_TX_DESC_DEFAULT,
250 rte_eth_dev_socket_id(BOND_PORT), NULL);
253 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
256 for (i = 0; i < slaves_count; i++) {
257 if (rte_eth_bond_slave_add(BOND_PORT, slaves[i]) == -1)
258 rte_exit(-1, "Oooops! adding slave (%u) to bond (%u) failed!\n",
259 slaves[i], BOND_PORT);
263 retval = rte_eth_dev_start(BOND_PORT);
265 rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval);
267 rte_eth_promiscuous_enable(BOND_PORT);
269 struct ether_addr addr;
271 rte_eth_macaddr_get(BOND_PORT, &addr);
272 printf("Port %u MAC: ", (unsigned)BOND_PORT);
278 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
280 size_t vlan_offset = 0;
282 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
283 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
285 vlan_offset = sizeof(struct vlan_hdr);
286 *proto = vlan_hdr->eth_proto;
288 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
289 vlan_hdr = vlan_hdr + 1;
291 *proto = vlan_hdr->eth_proto;
292 vlan_offset += sizeof(struct vlan_hdr);
298 struct global_flag_stru_t {
299 int LcoreMainIsRunning;
301 uint32_t port_packets[4];
304 struct global_flag_stru_t global_flag_stru;
305 struct global_flag_stru_t *global_flag_stru_p = &global_flag_stru;
308 * Main thread that does the work, reading from INPUT_PORT
309 * and writing to OUTPUT_PORT
311 static int lcore_main(__attribute__((unused)) void *arg1)
313 struct rte_mbuf *pkts[MAX_PKT_BURST] __rte_cache_aligned;
314 struct ether_addr d_addr;
316 struct ether_hdr *eth_hdr;
317 struct arp_hdr *arp_hdr;
318 struct ipv4_hdr *ipv4_hdr;
319 uint16_t ether_type, offset;
326 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
327 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
329 rte_spinlock_trylock(&global_flag_stru_p->lock);
331 while (global_flag_stru_p->LcoreMainIsRunning) {
332 rte_spinlock_unlock(&global_flag_stru_p->lock);
333 rx_cnt = rte_eth_rx_burst(BOND_PORT, 0, pkts, MAX_PKT_BURST);
336 /* If didn't receive any packets, wait and go to next iteration */
342 /* Search incoming data for ARP packets and prepare response */
343 for (i = 0; i < rx_cnt; i++) {
344 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
345 global_flag_stru_p->port_packets[0]++;
346 rte_spinlock_unlock(&global_flag_stru_p->lock);
348 eth_hdr = rte_pktmbuf_mtod(pkts[i], struct ether_hdr *);
349 ether_type = eth_hdr->ether_type;
350 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))
351 printf("VLAN taged frame, offset:");
352 offset = get_vlan_offset(eth_hdr, ðer_type);
354 printf("%d\n", offset);
355 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
356 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
357 global_flag_stru_p->port_packets[1]++;
358 rte_spinlock_unlock(&global_flag_stru_p->lock);
360 arp_hdr = (struct arp_hdr *)((char *)(eth_hdr + 1) + offset);
361 if (arp_hdr->arp_data.arp_tip == bond_ip) {
362 if (arp_hdr->arp_op == rte_cpu_to_be_16(ARP_OP_REQUEST)) {
363 arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
364 /* Switch src and dst data and set bonding MAC */
365 ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
366 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
367 ether_addr_copy(&arp_hdr->arp_data.arp_sha, &arp_hdr->arp_data.arp_tha);
368 arp_hdr->arp_data.arp_tip = arp_hdr->arp_data.arp_sip;
369 rte_eth_macaddr_get(BOND_PORT, &d_addr);
370 ether_addr_copy(&d_addr, &arp_hdr->arp_data.arp_sha);
371 arp_hdr->arp_data.arp_sip = bond_ip;
372 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
375 rte_eth_tx_burst(BOND_PORT, 0, NULL, 0);
378 } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
379 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
380 global_flag_stru_p->port_packets[2]++;
381 rte_spinlock_unlock(&global_flag_stru_p->lock);
383 ipv4_hdr = (struct ipv4_hdr *)((char *)(eth_hdr + 1) + offset);
384 if (ipv4_hdr->dst_addr == bond_ip) {
385 ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
386 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
387 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
388 ipv4_hdr->src_addr = bond_ip;
389 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
394 /* Free processed packets */
396 rte_pktmbuf_free(pkts[i]);
398 rte_spinlock_trylock(&global_flag_stru_p->lock);
400 rte_spinlock_unlock(&global_flag_stru_p->lock);
401 printf("BYE lcore_main\n");
405 struct cmd_obj_send_result {
406 cmdline_fixed_string_t action;
409 static inline void get_string(struct cmd_obj_send_result *res, char *buf, uint8_t size)
411 snprintf(buf, size, NIPQUAD_FMT,
412 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[0]),
413 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[1]),
414 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[2]),
415 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[3])
418 static void cmd_obj_send_parsed(void *parsed_result,
419 __attribute__((unused)) struct cmdline *cl,
420 __attribute__((unused)) void *data)
423 struct cmd_obj_send_result *res = parsed_result;
424 char ip_str[INET6_ADDRSTRLEN];
426 struct rte_mbuf *created_pkt;
427 struct ether_hdr *eth_hdr;
428 struct arp_hdr *arp_hdr;
433 if (res->ip.family == AF_INET)
434 get_string(res, ip_str, INET_ADDRSTRLEN);
436 cmdline_printf(cl, "Wrong IP format. Only IPv4 is supported\n");
438 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
439 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
441 created_pkt = rte_pktmbuf_alloc(mbuf_pool);
442 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr);
443 created_pkt->data_len = pkt_size;
444 created_pkt->pkt_len = pkt_size;
446 eth_hdr = rte_pktmbuf_mtod(created_pkt, struct ether_hdr *);
447 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
448 memset(ð_hdr->d_addr, 0xFF, ETHER_ADDR_LEN);
449 eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);
451 arp_hdr = (struct arp_hdr *)((char *)eth_hdr + sizeof(struct ether_hdr));
452 arp_hdr->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER);
453 arp_hdr->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
454 arp_hdr->arp_hln = ETHER_ADDR_LEN;
455 arp_hdr->arp_pln = sizeof(uint32_t);
456 arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REQUEST);
458 rte_eth_macaddr_get(BOND_PORT, &arp_hdr->arp_data.arp_sha);
459 arp_hdr->arp_data.arp_sip = bond_ip;
460 memset(&arp_hdr->arp_data.arp_tha, 0, ETHER_ADDR_LEN);
461 arp_hdr->arp_data.arp_tip =
462 ((unsigned char *)&res->ip.addr.ipv4)[0] |
463 (((unsigned char *)&res->ip.addr.ipv4)[1] << 8) |
464 (((unsigned char *)&res->ip.addr.ipv4)[2] << 16) |
465 (((unsigned char *)&res->ip.addr.ipv4)[3] << 24);
466 rte_eth_tx_burst(BOND_PORT, 0, &created_pkt, 1);
469 cmdline_printf(cl, "\n");
472 cmdline_parse_token_string_t cmd_obj_action_send =
473 TOKEN_STRING_INITIALIZER(struct cmd_obj_send_result, action, "send");
474 cmdline_parse_token_ipaddr_t cmd_obj_ip =
475 TOKEN_IPV4_INITIALIZER(struct cmd_obj_send_result, ip);
477 cmdline_parse_inst_t cmd_obj_send = {
478 .f = cmd_obj_send_parsed, /* function to call */
479 .data = NULL, /* 2nd arg of func */
480 .help_str = "send client_ip",
481 .tokens = { /* token list, NULL terminated */
482 (void *)&cmd_obj_action_send,
488 struct cmd_start_result {
489 cmdline_fixed_string_t start;
492 static void cmd_start_parsed(__attribute__((unused)) void *parsed_result,
494 __attribute__((unused)) void *data)
496 int slave_core_id = rte_lcore_id();
498 rte_spinlock_trylock(&global_flag_stru_p->lock);
499 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
500 if (lcore_config[global_flag_stru_p->LcoreMainCore].state != WAIT) {
501 rte_spinlock_unlock(&global_flag_stru_p->lock);
504 rte_spinlock_unlock(&global_flag_stru_p->lock);
506 cmdline_printf(cl, "lcore_main already running on core:%d\n",
507 global_flag_stru_p->LcoreMainCore);
508 rte_spinlock_unlock(&global_flag_stru_p->lock);
512 /* start lcore main on core != master_core - ARP response thread */
513 slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
514 if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
517 rte_spinlock_trylock(&global_flag_stru_p->lock);
518 global_flag_stru_p->LcoreMainIsRunning = 1;
519 rte_spinlock_unlock(&global_flag_stru_p->lock);
521 "Starting lcore_main on core %d:%d "
522 "Our IP:%d.%d.%d.%d\n",
524 rte_eal_remote_launch(lcore_main, NULL, slave_core_id),
532 cmdline_parse_token_string_t cmd_start_start =
533 TOKEN_STRING_INITIALIZER(struct cmd_start_result, start, "start");
535 cmdline_parse_inst_t cmd_start = {
536 .f = cmd_start_parsed, /* function to call */
537 .data = NULL, /* 2nd arg of func */
538 .help_str = "starts listening if not started at startup",
539 .tokens = { /* token list, NULL terminated */
540 (void *)&cmd_start_start,
545 struct cmd_help_result {
546 cmdline_fixed_string_t help;
549 static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
551 __attribute__((unused)) void *data)
554 "ALB - link bonding mode 6 example\n"
555 "send IP - sends one ARPrequest thru bonding for IP.\n"
556 "start - starts listening ARPs.\n"
557 "stop - stops lcore_main.\n"
558 "show - shows some bond info: ex. active slaves etc.\n"
559 "help - prints help.\n"
560 "quit - terminate all threads and quit.\n"
564 cmdline_parse_token_string_t cmd_help_help =
565 TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
567 cmdline_parse_inst_t cmd_help = {
568 .f = cmd_help_parsed, /* function to call */
569 .data = NULL, /* 2nd arg of func */
570 .help_str = "show help",
571 .tokens = { /* token list, NULL terminated */
572 (void *)&cmd_help_help,
577 struct cmd_stop_result {
578 cmdline_fixed_string_t stop;
581 static void cmd_stop_parsed(__attribute__((unused)) void *parsed_result,
583 __attribute__((unused)) void *data)
585 rte_spinlock_trylock(&global_flag_stru_p->lock);
586 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
588 "lcore_main not running on core:%d\n",
589 global_flag_stru_p->LcoreMainCore);
590 rte_spinlock_unlock(&global_flag_stru_p->lock);
593 global_flag_stru_p->LcoreMainIsRunning = 0;
594 rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore);
596 "lcore_main stopped on core:%d\n",
597 global_flag_stru_p->LcoreMainCore);
598 rte_spinlock_unlock(&global_flag_stru_p->lock);
601 cmdline_parse_token_string_t cmd_stop_stop =
602 TOKEN_STRING_INITIALIZER(struct cmd_stop_result, stop, "stop");
604 cmdline_parse_inst_t cmd_stop = {
605 .f = cmd_stop_parsed, /* function to call */
606 .data = NULL, /* 2nd arg of func */
607 .help_str = "this command do not handle any arguments",
608 .tokens = { /* token list, NULL terminated */
609 (void *)&cmd_stop_stop,
614 struct cmd_quit_result {
615 cmdline_fixed_string_t quit;
618 static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
620 __attribute__((unused)) void *data)
622 rte_spinlock_trylock(&global_flag_stru_p->lock);
623 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
625 "lcore_main not running on core:%d\n",
626 global_flag_stru_p->LcoreMainCore);
627 rte_spinlock_unlock(&global_flag_stru_p->lock);
631 global_flag_stru_p->LcoreMainIsRunning = 0;
632 rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore);
634 "lcore_main stopped on core:%d\n",
635 global_flag_stru_p->LcoreMainCore);
636 rte_spinlock_unlock(&global_flag_stru_p->lock);
640 cmdline_parse_token_string_t cmd_quit_quit =
641 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
643 cmdline_parse_inst_t cmd_quit = {
644 .f = cmd_quit_parsed, /* function to call */
645 .data = NULL, /* 2nd arg of func */
646 .help_str = "this command do not handle any arguments",
647 .tokens = { /* token list, NULL terminated */
648 (void *)&cmd_quit_quit,
653 struct cmd_show_result {
654 cmdline_fixed_string_t show;
657 static void cmd_show_parsed(__attribute__((unused)) void *parsed_result,
659 __attribute__((unused)) void *data)
661 uint8_t slaves[16] = {0};
663 struct ether_addr addr;
666 while (i < slaves_count) {
667 rte_eth_macaddr_get(i, &addr);
673 rte_spinlock_trylock(&global_flag_stru_p->lock);
676 "packets received:Tot:%d Arp:%d IPv4:%d\n",
677 rte_eth_bond_active_slaves_get(BOND_PORT, slaves, len),
678 global_flag_stru_p->port_packets[0],
679 global_flag_stru_p->port_packets[1],
680 global_flag_stru_p->port_packets[2]);
681 rte_spinlock_unlock(&global_flag_stru_p->lock);
684 cmdline_parse_token_string_t cmd_show_show =
685 TOKEN_STRING_INITIALIZER(struct cmd_show_result, show, "show");
687 cmdline_parse_inst_t cmd_show = {
688 .f = cmd_show_parsed, /* function to call */
689 .data = NULL, /* 2nd arg of func */
690 .help_str = "this command do not handle any arguments",
691 .tokens = { /* token list, NULL terminated */
692 (void *)&cmd_show_show,
697 /****** CONTEXT (list of instruction) */
699 cmdline_parse_ctx_t main_ctx[] = {
700 (cmdline_parse_inst_t *)&cmd_start,
701 (cmdline_parse_inst_t *)&cmd_obj_send,
702 (cmdline_parse_inst_t *)&cmd_stop,
703 (cmdline_parse_inst_t *)&cmd_show,
704 (cmdline_parse_inst_t *)&cmd_quit,
705 (cmdline_parse_inst_t *)&cmd_help,
709 /* prompt function, called from main on MASTER lcore */
710 static void *prompt(__attribute__((unused)) void *arg1)
714 cl = cmdline_stdin_new(main_ctx, "bond6>");
716 cmdline_interact(cl);
717 cmdline_stdin_exit(cl);
721 /* Main function, does initialisation and calls the per-lcore functions */
723 main(int argc, char *argv[])
729 ret = rte_eal_init(argc, argv);
730 rte_eal_devargs_dump(stdout);
732 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
736 nb_ports = rte_eth_dev_count();
738 rte_exit(EXIT_FAILURE, "Give at least one port\n");
739 else if (nb_ports > MAX_PORTS)
740 rte_exit(EXIT_FAILURE, "You can have max 4 ports\n");
742 mbuf_pool = rte_mempool_create("MBUF_POOL", NB_MBUF,
744 sizeof(struct rte_pktmbuf_pool_private),
745 rte_pktmbuf_pool_init, NULL,
746 rte_pktmbuf_init, NULL,
748 if (mbuf_pool == NULL)
749 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
751 /* initialize all ports */
752 slaves_count = nb_ports;
753 for (i = 0; i < nb_ports; i++) {
754 slave_port_init(i, mbuf_pool);
758 bond_port_init(mbuf_pool);
760 rte_spinlock_init(&global_flag_stru_p->lock);
761 int slave_core_id = rte_lcore_id();
763 /* check state of lcores */
764 RTE_LCORE_FOREACH_SLAVE(slave_core_id) {
765 if (lcore_config[slave_core_id].state != WAIT)
768 /* start lcore main on core != master_core - ARP response thread */
769 slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
770 if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
773 global_flag_stru_p->LcoreMainIsRunning = 1;
774 global_flag_stru_p->LcoreMainCore = slave_core_id;
775 printf("Starting lcore_main on core %d:%d Our IP:%d.%d.%d.%d\n",
777 rte_eal_remote_launch((lcore_function_t *)lcore_main,
786 /* Start prompt for user interact */