4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
36 #include <sys/socket.h>
50 #include <rte_common.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
56 #include <rte_per_lcore.h>
57 #include <rte_launch.h>
58 #include <rte_atomic.h>
59 #include <rte_cycles.h>
60 #include <rte_prefetch.h>
61 #include <rte_lcore.h>
62 #include <rte_per_lcore.h>
63 #include <rte_branch_prediction.h>
64 #include <rte_interrupts.h>
66 #include <rte_random.h>
67 #include <rte_debug.h>
68 #include <rte_ether.h>
69 #include <rte_ethdev.h>
71 #include <rte_mempool.h>
73 #include <rte_memcpy.h>
77 #include <rte_spinlock.h>
79 #include <cmdline_rdline.h>
80 #include <cmdline_parse.h>
81 #include <cmdline_parse_num.h>
82 #include <cmdline_parse_string.h>
83 #include <cmdline_parse_ipaddr.h>
84 #include <cmdline_parse_etheraddr.h>
85 #include <cmdline_socket.h>
90 #include <rte_devargs.h>
93 #include "rte_byteorder.h"
94 #include "rte_cpuflags.h"
95 #include "rte_eth_bond.h"
97 #define RTE_LOGTYPE_DCB RTE_LOGTYPE_USER1
99 #define NB_MBUF (1024*8)
101 #define MAX_PKT_BURST 32
102 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
103 #define BURST_RX_INTERVAL_NS (10) /* RX poll interval ~100ns */
106 * RX and TX Prefetch, Host, and Write-back threshold values should be
107 * carefully set for optimal performance. Consult the network
108 * controller's datasheet and supporting DPDK documentation for guidance
109 * on how these parameters should be set.
111 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
112 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
113 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
114 #define RX_FTHRESH (MAX_PKT_BURST * 2)/**< Default values of RX free threshold reg. */
117 * These default values are optimized for use with the Intel(R) 82599 10 GbE
118 * Controller and the DPDK ixgbe PMD. Consider using other values for other
119 * network controllers and/or network drivers.
121 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
122 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
123 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
126 * Configurable number of RX/TX ring descriptors
128 #define RTE_RX_DESC_DEFAULT 128
129 #define RTE_TX_DESC_DEFAULT 512
136 /* not defined under linux */
138 #define NIPQUAD_FMT "%u.%u.%u.%u"
142 #define PRINT_MAC(addr) printf("%02"PRIx8":%02"PRIx8":%02"PRIx8 \
143 ":%02"PRIx8":%02"PRIx8":%02"PRIx8, \
144 addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], \
145 addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5])
147 uint8_t slaves[RTE_MAX_ETHPORTS];
148 uint8_t slaves_count;
150 static uint8_t BOND_PORT = 0xff;
152 static struct rte_mempool *mbuf_pool;
154 static struct rte_eth_conf port_conf = {
156 .mq_mode = ETH_MQ_RX_NONE,
157 .max_rx_pkt_len = ETHER_MAX_LEN,
159 .header_split = 0, /**< Header Split disabled */
160 .hw_ip_checksum = 0, /**< IP checksum offload enabled */
161 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
162 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
163 .hw_strip_crc = 1, /**< CRC stripped by hardware */
168 .rss_hf = ETH_RSS_IP,
172 .mq_mode = ETH_MQ_TX_NONE,
177 slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
180 uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
181 uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
183 if (portid >= rte_eth_dev_count())
184 rte_exit(EXIT_FAILURE, "Invalid port\n");
186 retval = rte_eth_dev_configure(portid, 1, 1, &port_conf);
188 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
191 retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
193 rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
194 "failed (res=%d)\n", portid, retval);
197 retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
198 rte_eth_dev_socket_id(portid), NULL,
201 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
205 retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
206 rte_eth_dev_socket_id(portid), NULL);
209 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
212 retval = rte_eth_dev_start(portid);
215 "Start port %d failed (res=%d)",
218 struct ether_addr addr;
220 rte_eth_macaddr_get(portid, &addr);
221 printf("Port %u MAC: ", (unsigned)portid);
227 bond_port_init(struct rte_mempool *mbuf_pool)
231 uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
232 uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
234 retval = rte_eth_bond_create("bond0", BONDING_MODE_ALB,
235 0 /*SOCKET_ID_ANY*/);
237 rte_exit(EXIT_FAILURE,
238 "Faled to create bond port\n");
240 BOND_PORT = (uint8_t)retval;
242 retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &port_conf);
244 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
247 retval = rte_eth_dev_adjust_nb_rx_tx_desc(BOND_PORT, &nb_rxd, &nb_txd);
249 rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
250 "failed (res=%d)\n", BOND_PORT, retval);
253 retval = rte_eth_rx_queue_setup(BOND_PORT, 0, nb_rxd,
254 rte_eth_dev_socket_id(BOND_PORT), NULL,
257 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
261 retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
262 rte_eth_dev_socket_id(BOND_PORT), NULL);
265 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
268 for (i = 0; i < slaves_count; i++) {
269 if (rte_eth_bond_slave_add(BOND_PORT, slaves[i]) == -1)
270 rte_exit(-1, "Oooops! adding slave (%u) to bond (%u) failed!\n",
271 slaves[i], BOND_PORT);
275 retval = rte_eth_dev_start(BOND_PORT);
277 rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval);
279 rte_eth_promiscuous_enable(BOND_PORT);
281 struct ether_addr addr;
283 rte_eth_macaddr_get(BOND_PORT, &addr);
284 printf("Port %u MAC: ", (unsigned)BOND_PORT);
290 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
292 size_t vlan_offset = 0;
294 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
295 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
297 vlan_offset = sizeof(struct vlan_hdr);
298 *proto = vlan_hdr->eth_proto;
300 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
301 vlan_hdr = vlan_hdr + 1;
303 *proto = vlan_hdr->eth_proto;
304 vlan_offset += sizeof(struct vlan_hdr);
310 struct global_flag_stru_t {
311 int LcoreMainIsRunning;
313 uint32_t port_packets[4];
316 struct global_flag_stru_t global_flag_stru;
317 struct global_flag_stru_t *global_flag_stru_p = &global_flag_stru;
320 * Main thread that does the work, reading from INPUT_PORT
321 * and writing to OUTPUT_PORT
323 static int lcore_main(__attribute__((unused)) void *arg1)
325 struct rte_mbuf *pkts[MAX_PKT_BURST] __rte_cache_aligned;
326 struct ether_addr d_addr;
328 struct ether_hdr *eth_hdr;
329 struct arp_hdr *arp_hdr;
330 struct ipv4_hdr *ipv4_hdr;
331 uint16_t ether_type, offset;
338 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
339 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
341 rte_spinlock_trylock(&global_flag_stru_p->lock);
343 while (global_flag_stru_p->LcoreMainIsRunning) {
344 rte_spinlock_unlock(&global_flag_stru_p->lock);
345 rx_cnt = rte_eth_rx_burst(BOND_PORT, 0, pkts, MAX_PKT_BURST);
348 /* If didn't receive any packets, wait and go to next iteration */
354 /* Search incoming data for ARP packets and prepare response */
355 for (i = 0; i < rx_cnt; i++) {
356 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
357 global_flag_stru_p->port_packets[0]++;
358 rte_spinlock_unlock(&global_flag_stru_p->lock);
360 eth_hdr = rte_pktmbuf_mtod(pkts[i], struct ether_hdr *);
361 ether_type = eth_hdr->ether_type;
362 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))
363 printf("VLAN taged frame, offset:");
364 offset = get_vlan_offset(eth_hdr, ðer_type);
366 printf("%d\n", offset);
367 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
368 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
369 global_flag_stru_p->port_packets[1]++;
370 rte_spinlock_unlock(&global_flag_stru_p->lock);
372 arp_hdr = (struct arp_hdr *)((char *)(eth_hdr + 1) + offset);
373 if (arp_hdr->arp_data.arp_tip == bond_ip) {
374 if (arp_hdr->arp_op == rte_cpu_to_be_16(ARP_OP_REQUEST)) {
375 arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
376 /* Switch src and dst data and set bonding MAC */
377 ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
378 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
379 ether_addr_copy(&arp_hdr->arp_data.arp_sha, &arp_hdr->arp_data.arp_tha);
380 arp_hdr->arp_data.arp_tip = arp_hdr->arp_data.arp_sip;
381 rte_eth_macaddr_get(BOND_PORT, &d_addr);
382 ether_addr_copy(&d_addr, &arp_hdr->arp_data.arp_sha);
383 arp_hdr->arp_data.arp_sip = bond_ip;
384 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
387 rte_eth_tx_burst(BOND_PORT, 0, NULL, 0);
390 } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
391 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
392 global_flag_stru_p->port_packets[2]++;
393 rte_spinlock_unlock(&global_flag_stru_p->lock);
395 ipv4_hdr = (struct ipv4_hdr *)((char *)(eth_hdr + 1) + offset);
396 if (ipv4_hdr->dst_addr == bond_ip) {
397 ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
398 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
399 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
400 ipv4_hdr->src_addr = bond_ip;
401 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
406 /* Free processed packets */
408 rte_pktmbuf_free(pkts[i]);
410 rte_spinlock_trylock(&global_flag_stru_p->lock);
412 rte_spinlock_unlock(&global_flag_stru_p->lock);
413 printf("BYE lcore_main\n");
417 struct cmd_obj_send_result {
418 cmdline_fixed_string_t action;
421 static inline void get_string(struct cmd_obj_send_result *res, char *buf, uint8_t size)
423 snprintf(buf, size, NIPQUAD_FMT,
424 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[0]),
425 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[1]),
426 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[2]),
427 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[3])
430 static void cmd_obj_send_parsed(void *parsed_result,
431 __attribute__((unused)) struct cmdline *cl,
432 __attribute__((unused)) void *data)
435 struct cmd_obj_send_result *res = parsed_result;
436 char ip_str[INET6_ADDRSTRLEN];
438 struct rte_mbuf *created_pkt;
439 struct ether_hdr *eth_hdr;
440 struct arp_hdr *arp_hdr;
445 if (res->ip.family == AF_INET)
446 get_string(res, ip_str, INET_ADDRSTRLEN);
448 cmdline_printf(cl, "Wrong IP format. Only IPv4 is supported\n");
450 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
451 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
453 created_pkt = rte_pktmbuf_alloc(mbuf_pool);
454 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr);
455 created_pkt->data_len = pkt_size;
456 created_pkt->pkt_len = pkt_size;
458 eth_hdr = rte_pktmbuf_mtod(created_pkt, struct ether_hdr *);
459 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
460 memset(ð_hdr->d_addr, 0xFF, ETHER_ADDR_LEN);
461 eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);
463 arp_hdr = (struct arp_hdr *)((char *)eth_hdr + sizeof(struct ether_hdr));
464 arp_hdr->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER);
465 arp_hdr->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
466 arp_hdr->arp_hln = ETHER_ADDR_LEN;
467 arp_hdr->arp_pln = sizeof(uint32_t);
468 arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REQUEST);
470 rte_eth_macaddr_get(BOND_PORT, &arp_hdr->arp_data.arp_sha);
471 arp_hdr->arp_data.arp_sip = bond_ip;
472 memset(&arp_hdr->arp_data.arp_tha, 0, ETHER_ADDR_LEN);
473 arp_hdr->arp_data.arp_tip =
474 ((unsigned char *)&res->ip.addr.ipv4)[0] |
475 (((unsigned char *)&res->ip.addr.ipv4)[1] << 8) |
476 (((unsigned char *)&res->ip.addr.ipv4)[2] << 16) |
477 (((unsigned char *)&res->ip.addr.ipv4)[3] << 24);
478 rte_eth_tx_burst(BOND_PORT, 0, &created_pkt, 1);
481 cmdline_printf(cl, "\n");
484 cmdline_parse_token_string_t cmd_obj_action_send =
485 TOKEN_STRING_INITIALIZER(struct cmd_obj_send_result, action, "send");
486 cmdline_parse_token_ipaddr_t cmd_obj_ip =
487 TOKEN_IPV4_INITIALIZER(struct cmd_obj_send_result, ip);
489 cmdline_parse_inst_t cmd_obj_send = {
490 .f = cmd_obj_send_parsed, /* function to call */
491 .data = NULL, /* 2nd arg of func */
492 .help_str = "send client_ip",
493 .tokens = { /* token list, NULL terminated */
494 (void *)&cmd_obj_action_send,
500 struct cmd_start_result {
501 cmdline_fixed_string_t start;
504 static void cmd_start_parsed(__attribute__((unused)) void *parsed_result,
506 __attribute__((unused)) void *data)
508 int slave_core_id = rte_lcore_id();
510 rte_spinlock_trylock(&global_flag_stru_p->lock);
511 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
512 if (lcore_config[global_flag_stru_p->LcoreMainCore].state != WAIT) {
513 rte_spinlock_unlock(&global_flag_stru_p->lock);
516 rte_spinlock_unlock(&global_flag_stru_p->lock);
518 cmdline_printf(cl, "lcore_main already running on core:%d\n",
519 global_flag_stru_p->LcoreMainCore);
520 rte_spinlock_unlock(&global_flag_stru_p->lock);
524 /* start lcore main on core != master_core - ARP response thread */
525 slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
526 if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
529 rte_spinlock_trylock(&global_flag_stru_p->lock);
530 global_flag_stru_p->LcoreMainIsRunning = 1;
531 rte_spinlock_unlock(&global_flag_stru_p->lock);
533 "Starting lcore_main on core %d:%d "
534 "Our IP:%d.%d.%d.%d\n",
536 rte_eal_remote_launch(lcore_main, NULL, slave_core_id),
544 cmdline_parse_token_string_t cmd_start_start =
545 TOKEN_STRING_INITIALIZER(struct cmd_start_result, start, "start");
547 cmdline_parse_inst_t cmd_start = {
548 .f = cmd_start_parsed, /* function to call */
549 .data = NULL, /* 2nd arg of func */
550 .help_str = "starts listening if not started at startup",
551 .tokens = { /* token list, NULL terminated */
552 (void *)&cmd_start_start,
557 struct cmd_help_result {
558 cmdline_fixed_string_t help;
561 static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
563 __attribute__((unused)) void *data)
566 "ALB - link bonding mode 6 example\n"
567 "send IP - sends one ARPrequest through bonding for IP.\n"
568 "start - starts listening ARPs.\n"
569 "stop - stops lcore_main.\n"
570 "show - shows some bond info: ex. active slaves etc.\n"
571 "help - prints help.\n"
572 "quit - terminate all threads and quit.\n"
576 cmdline_parse_token_string_t cmd_help_help =
577 TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
579 cmdline_parse_inst_t cmd_help = {
580 .f = cmd_help_parsed, /* function to call */
581 .data = NULL, /* 2nd arg of func */
582 .help_str = "show help",
583 .tokens = { /* token list, NULL terminated */
584 (void *)&cmd_help_help,
589 struct cmd_stop_result {
590 cmdline_fixed_string_t stop;
593 static void cmd_stop_parsed(__attribute__((unused)) void *parsed_result,
595 __attribute__((unused)) void *data)
597 rte_spinlock_trylock(&global_flag_stru_p->lock);
598 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
600 "lcore_main not running on core:%d\n",
601 global_flag_stru_p->LcoreMainCore);
602 rte_spinlock_unlock(&global_flag_stru_p->lock);
605 global_flag_stru_p->LcoreMainIsRunning = 0;
606 if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0)
608 "error: lcore_main can not stop on core:%d\n",
609 global_flag_stru_p->LcoreMainCore);
612 "lcore_main stopped on core:%d\n",
613 global_flag_stru_p->LcoreMainCore);
614 rte_spinlock_unlock(&global_flag_stru_p->lock);
617 cmdline_parse_token_string_t cmd_stop_stop =
618 TOKEN_STRING_INITIALIZER(struct cmd_stop_result, stop, "stop");
620 cmdline_parse_inst_t cmd_stop = {
621 .f = cmd_stop_parsed, /* function to call */
622 .data = NULL, /* 2nd arg of func */
623 .help_str = "this command do not handle any arguments",
624 .tokens = { /* token list, NULL terminated */
625 (void *)&cmd_stop_stop,
630 struct cmd_quit_result {
631 cmdline_fixed_string_t quit;
634 static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
636 __attribute__((unused)) void *data)
638 rte_spinlock_trylock(&global_flag_stru_p->lock);
639 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
641 "lcore_main not running on core:%d\n",
642 global_flag_stru_p->LcoreMainCore);
643 rte_spinlock_unlock(&global_flag_stru_p->lock);
647 global_flag_stru_p->LcoreMainIsRunning = 0;
648 if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0)
650 "error: lcore_main can not stop on core:%d\n",
651 global_flag_stru_p->LcoreMainCore);
654 "lcore_main stopped on core:%d\n",
655 global_flag_stru_p->LcoreMainCore);
656 rte_spinlock_unlock(&global_flag_stru_p->lock);
660 cmdline_parse_token_string_t cmd_quit_quit =
661 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
663 cmdline_parse_inst_t cmd_quit = {
664 .f = cmd_quit_parsed, /* function to call */
665 .data = NULL, /* 2nd arg of func */
666 .help_str = "this command do not handle any arguments",
667 .tokens = { /* token list, NULL terminated */
668 (void *)&cmd_quit_quit,
673 struct cmd_show_result {
674 cmdline_fixed_string_t show;
677 static void cmd_show_parsed(__attribute__((unused)) void *parsed_result,
679 __attribute__((unused)) void *data)
681 uint8_t slaves[16] = {0};
683 struct ether_addr addr;
686 while (i < slaves_count) {
687 rte_eth_macaddr_get(i, &addr);
693 rte_spinlock_trylock(&global_flag_stru_p->lock);
696 "packets received:Tot:%d Arp:%d IPv4:%d\n",
697 rte_eth_bond_active_slaves_get(BOND_PORT, slaves, len),
698 global_flag_stru_p->port_packets[0],
699 global_flag_stru_p->port_packets[1],
700 global_flag_stru_p->port_packets[2]);
701 rte_spinlock_unlock(&global_flag_stru_p->lock);
704 cmdline_parse_token_string_t cmd_show_show =
705 TOKEN_STRING_INITIALIZER(struct cmd_show_result, show, "show");
707 cmdline_parse_inst_t cmd_show = {
708 .f = cmd_show_parsed, /* function to call */
709 .data = NULL, /* 2nd arg of func */
710 .help_str = "this command do not handle any arguments",
711 .tokens = { /* token list, NULL terminated */
712 (void *)&cmd_show_show,
717 /****** CONTEXT (list of instruction) */
719 cmdline_parse_ctx_t main_ctx[] = {
720 (cmdline_parse_inst_t *)&cmd_start,
721 (cmdline_parse_inst_t *)&cmd_obj_send,
722 (cmdline_parse_inst_t *)&cmd_stop,
723 (cmdline_parse_inst_t *)&cmd_show,
724 (cmdline_parse_inst_t *)&cmd_quit,
725 (cmdline_parse_inst_t *)&cmd_help,
729 /* prompt function, called from main on MASTER lcore */
730 static void prompt(__attribute__((unused)) void *arg1)
734 cl = cmdline_stdin_new(main_ctx, "bond6>");
736 cmdline_interact(cl);
737 cmdline_stdin_exit(cl);
741 /* Main function, does initialisation and calls the per-lcore functions */
743 main(int argc, char *argv[])
749 ret = rte_eal_init(argc, argv);
750 rte_eal_devargs_dump(stdout);
752 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
756 nb_ports = rte_eth_dev_count();
758 rte_exit(EXIT_FAILURE, "Give at least one port\n");
759 else if (nb_ports > MAX_PORTS)
760 rte_exit(EXIT_FAILURE, "You can have max 4 ports\n");
762 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NB_MBUF, 32,
763 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
764 if (mbuf_pool == NULL)
765 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
767 /* initialize all ports */
768 slaves_count = nb_ports;
769 for (i = 0; i < nb_ports; i++) {
770 slave_port_init(i, mbuf_pool);
774 bond_port_init(mbuf_pool);
776 rte_spinlock_init(&global_flag_stru_p->lock);
777 int slave_core_id = rte_lcore_id();
779 /* check state of lcores */
780 RTE_LCORE_FOREACH_SLAVE(slave_core_id) {
781 if (lcore_config[slave_core_id].state != WAIT)
784 /* start lcore main on core != master_core - ARP response thread */
785 slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
786 if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
789 global_flag_stru_p->LcoreMainIsRunning = 1;
790 global_flag_stru_p->LcoreMainCore = slave_core_id;
791 printf("Starting lcore_main on core %d:%d Our IP:%d.%d.%d.%d\n",
793 rte_eal_remote_launch((lcore_function_t *)lcore_main,
802 /* Start prompt for user interact */