1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
7 #include <sys/socket.h>
21 #include <rte_common.h>
23 #include <rte_memory.h>
24 #include <rte_memcpy.h>
26 #include <rte_launch.h>
27 #include <rte_atomic.h>
28 #include <rte_cycles.h>
29 #include <rte_prefetch.h>
30 #include <rte_lcore.h>
31 #include <rte_per_lcore.h>
32 #include <rte_branch_prediction.h>
33 #include <rte_interrupts.h>
34 #include <rte_random.h>
35 #include <rte_debug.h>
36 #include <rte_ether.h>
37 #include <rte_ethdev.h>
38 #include <rte_mempool.h>
43 #include <rte_spinlock.h>
45 #include <cmdline_rdline.h>
46 #include <cmdline_parse.h>
47 #include <cmdline_parse_num.h>
48 #include <cmdline_parse_string.h>
49 #include <cmdline_parse_ipaddr.h>
50 #include <cmdline_parse_etheraddr.h>
51 #include <cmdline_socket.h>
56 #include <rte_devargs.h>
59 #include "rte_byteorder.h"
60 #include "rte_cpuflags.h"
61 #include "rte_eth_bond.h"
63 #define RTE_LOGTYPE_DCB RTE_LOGTYPE_USER1
65 #define NB_MBUF (1024*8)
67 #define MAX_PKT_BURST 32
68 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
69 #define BURST_RX_INTERVAL_NS (10) /* RX poll interval ~100ns */
72 * RX and TX Prefetch, Host, and Write-back threshold values should be
73 * carefully set for optimal performance. Consult the network
74 * controller's datasheet and supporting DPDK documentation for guidance
75 * on how these parameters should be set.
77 #define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
78 #define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
79 #define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
80 #define RX_FTHRESH (MAX_PKT_BURST * 2)/**< Default values of RX free threshold reg. */
83 * These default values are optimized for use with the Intel(R) 82599 10 GbE
84 * Controller and the DPDK ixgbe PMD. Consider using other values for other
85 * network controllers and/or network drivers.
87 #define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
88 #define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
89 #define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
92 * Configurable number of RX/TX ring descriptors
94 #define RTE_RX_DESC_DEFAULT 1024
95 #define RTE_TX_DESC_DEFAULT 1024
102 /* not defined under linux */
104 #define NIPQUAD_FMT "%u.%u.%u.%u"
108 #define PRINT_MAC(addr) printf("%02"PRIx8":%02"PRIx8":%02"PRIx8 \
109 ":%02"PRIx8":%02"PRIx8":%02"PRIx8, \
110 addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], \
111 addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5])
113 uint16_t slaves[RTE_MAX_ETHPORTS];
114 uint16_t slaves_count;
116 static uint16_t BOND_PORT = 0xffff;
118 static struct rte_mempool *mbuf_pool;
120 static struct rte_eth_conf port_conf = {
122 .mq_mode = ETH_MQ_RX_NONE,
123 .max_rx_pkt_len = ETHER_MAX_LEN,
129 .rss_hf = ETH_RSS_IP,
133 .mq_mode = ETH_MQ_TX_NONE,
138 slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
141 uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
142 uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
143 struct rte_eth_dev_info dev_info;
144 struct rte_eth_rxconf rxq_conf;
145 struct rte_eth_txconf txq_conf;
146 struct rte_eth_conf local_port_conf = port_conf;
148 if (!rte_eth_dev_is_valid_port(portid))
149 rte_exit(EXIT_FAILURE, "Invalid port\n");
151 rte_eth_dev_info_get(portid, &dev_info);
152 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
153 local_port_conf.txmode.offloads |=
154 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
156 local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
157 dev_info.flow_type_rss_offloads;
158 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
159 port_conf.rx_adv_conf.rss_conf.rss_hf) {
160 printf("Port %u modified RSS hash function based on hardware support,"
161 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
163 port_conf.rx_adv_conf.rss_conf.rss_hf,
164 local_port_conf.rx_adv_conf.rss_conf.rss_hf);
167 retval = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
169 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
172 retval = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd, &nb_txd);
174 rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
175 "failed (res=%d)\n", portid, retval);
178 rxq_conf = dev_info.default_rxconf;
179 rxq_conf.offloads = local_port_conf.rxmode.offloads;
180 retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
181 rte_eth_dev_socket_id(portid),
185 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
189 txq_conf = dev_info.default_txconf;
190 txq_conf.offloads = local_port_conf.txmode.offloads;
191 retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
192 rte_eth_dev_socket_id(portid), &txq_conf);
195 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
198 retval = rte_eth_dev_start(portid);
201 "Start port %d failed (res=%d)",
204 struct ether_addr addr;
206 rte_eth_macaddr_get(portid, &addr);
207 printf("Port %u MAC: ", portid);
213 bond_port_init(struct rte_mempool *mbuf_pool)
217 uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
218 uint16_t nb_txd = RTE_TX_DESC_DEFAULT;
219 struct rte_eth_dev_info dev_info;
220 struct rte_eth_rxconf rxq_conf;
221 struct rte_eth_txconf txq_conf;
222 struct rte_eth_conf local_port_conf = port_conf;
224 retval = rte_eth_bond_create("net_bonding0", BONDING_MODE_ALB,
225 0 /*SOCKET_ID_ANY*/);
227 rte_exit(EXIT_FAILURE,
228 "Faled to create bond port\n");
232 rte_eth_dev_info_get(BOND_PORT, &dev_info);
233 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
234 local_port_conf.txmode.offloads |=
235 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
236 retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &local_port_conf);
238 rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n",
241 retval = rte_eth_dev_adjust_nb_rx_tx_desc(BOND_PORT, &nb_rxd, &nb_txd);
243 rte_exit(EXIT_FAILURE, "port %u: rte_eth_dev_adjust_nb_rx_tx_desc "
244 "failed (res=%d)\n", BOND_PORT, retval);
247 rxq_conf = dev_info.default_rxconf;
248 rxq_conf.offloads = local_port_conf.rxmode.offloads;
249 retval = rte_eth_rx_queue_setup(BOND_PORT, 0, nb_rxd,
250 rte_eth_dev_socket_id(BOND_PORT),
251 &rxq_conf, mbuf_pool);
253 rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)",
257 txq_conf = dev_info.default_txconf;
258 txq_conf.offloads = local_port_conf.txmode.offloads;
259 retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
260 rte_eth_dev_socket_id(BOND_PORT), &txq_conf);
263 rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)",
266 for (i = 0; i < slaves_count; i++) {
267 if (rte_eth_bond_slave_add(BOND_PORT, slaves[i]) == -1)
268 rte_exit(-1, "Oooops! adding slave (%u) to bond (%u) failed!\n",
269 slaves[i], BOND_PORT);
273 retval = rte_eth_dev_start(BOND_PORT);
275 rte_exit(retval, "Start port %d failed (res=%d)", BOND_PORT, retval);
277 rte_eth_promiscuous_enable(BOND_PORT);
279 struct ether_addr addr;
281 rte_eth_macaddr_get(BOND_PORT, &addr);
282 printf("Port %u MAC: ", (unsigned)BOND_PORT);
288 get_vlan_offset(struct ether_hdr *eth_hdr, uint16_t *proto)
290 size_t vlan_offset = 0;
292 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
293 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
295 vlan_offset = sizeof(struct vlan_hdr);
296 *proto = vlan_hdr->eth_proto;
298 if (rte_cpu_to_be_16(ETHER_TYPE_VLAN) == *proto) {
299 vlan_hdr = vlan_hdr + 1;
301 *proto = vlan_hdr->eth_proto;
302 vlan_offset += sizeof(struct vlan_hdr);
308 struct global_flag_stru_t {
309 int LcoreMainIsRunning;
311 uint32_t port_packets[4];
314 struct global_flag_stru_t global_flag_stru;
315 struct global_flag_stru_t *global_flag_stru_p = &global_flag_stru;
318 * Main thread that does the work, reading from INPUT_PORT
319 * and writing to OUTPUT_PORT
321 static int lcore_main(__attribute__((unused)) void *arg1)
323 struct rte_mbuf *pkts[MAX_PKT_BURST] __rte_cache_aligned;
324 struct ether_addr d_addr;
326 struct ether_hdr *eth_hdr;
327 struct arp_hdr *arp_hdr;
328 struct ipv4_hdr *ipv4_hdr;
329 uint16_t ether_type, offset;
336 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
337 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
339 rte_spinlock_trylock(&global_flag_stru_p->lock);
341 while (global_flag_stru_p->LcoreMainIsRunning) {
342 rte_spinlock_unlock(&global_flag_stru_p->lock);
343 rx_cnt = rte_eth_rx_burst(BOND_PORT, 0, pkts, MAX_PKT_BURST);
346 /* If didn't receive any packets, wait and go to next iteration */
352 /* Search incoming data for ARP packets and prepare response */
353 for (i = 0; i < rx_cnt; i++) {
354 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
355 global_flag_stru_p->port_packets[0]++;
356 rte_spinlock_unlock(&global_flag_stru_p->lock);
358 eth_hdr = rte_pktmbuf_mtod(pkts[i], struct ether_hdr *);
359 ether_type = eth_hdr->ether_type;
360 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_VLAN))
361 printf("VLAN taged frame, offset:");
362 offset = get_vlan_offset(eth_hdr, ðer_type);
364 printf("%d\n", offset);
365 if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
366 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
367 global_flag_stru_p->port_packets[1]++;
368 rte_spinlock_unlock(&global_flag_stru_p->lock);
370 arp_hdr = (struct arp_hdr *)((char *)(eth_hdr + 1) + offset);
371 if (arp_hdr->arp_data.arp_tip == bond_ip) {
372 if (arp_hdr->arp_op == rte_cpu_to_be_16(ARP_OP_REQUEST)) {
373 arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REPLY);
374 /* Switch src and dst data and set bonding MAC */
375 ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
376 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
377 ether_addr_copy(&arp_hdr->arp_data.arp_sha, &arp_hdr->arp_data.arp_tha);
378 arp_hdr->arp_data.arp_tip = arp_hdr->arp_data.arp_sip;
379 rte_eth_macaddr_get(BOND_PORT, &d_addr);
380 ether_addr_copy(&d_addr, &arp_hdr->arp_data.arp_sha);
381 arp_hdr->arp_data.arp_sip = bond_ip;
382 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
385 rte_eth_tx_burst(BOND_PORT, 0, NULL, 0);
388 } else if (ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
389 if (rte_spinlock_trylock(&global_flag_stru_p->lock) == 1) {
390 global_flag_stru_p->port_packets[2]++;
391 rte_spinlock_unlock(&global_flag_stru_p->lock);
393 ipv4_hdr = (struct ipv4_hdr *)((char *)(eth_hdr + 1) + offset);
394 if (ipv4_hdr->dst_addr == bond_ip) {
395 ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
396 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
397 ipv4_hdr->dst_addr = ipv4_hdr->src_addr;
398 ipv4_hdr->src_addr = bond_ip;
399 rte_eth_tx_burst(BOND_PORT, 0, &pkts[i], 1);
404 /* Free processed packets */
406 rte_pktmbuf_free(pkts[i]);
408 rte_spinlock_trylock(&global_flag_stru_p->lock);
410 rte_spinlock_unlock(&global_flag_stru_p->lock);
411 printf("BYE lcore_main\n");
415 struct cmd_obj_send_result {
416 cmdline_fixed_string_t action;
419 static inline void get_string(struct cmd_obj_send_result *res, char *buf, uint8_t size)
421 snprintf(buf, size, NIPQUAD_FMT,
422 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[0]),
423 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[1]),
424 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[2]),
425 ((unsigned)((unsigned char *)&(res->ip.addr.ipv4))[3])
428 static void cmd_obj_send_parsed(void *parsed_result,
429 __attribute__((unused)) struct cmdline *cl,
430 __attribute__((unused)) void *data)
433 struct cmd_obj_send_result *res = parsed_result;
434 char ip_str[INET6_ADDRSTRLEN];
436 struct rte_mbuf *created_pkt;
437 struct ether_hdr *eth_hdr;
438 struct arp_hdr *arp_hdr;
443 if (res->ip.family == AF_INET)
444 get_string(res, ip_str, INET_ADDRSTRLEN);
446 cmdline_printf(cl, "Wrong IP format. Only IPv4 is supported\n");
448 bond_ip = BOND_IP_1 | (BOND_IP_2 << 8) |
449 (BOND_IP_3 << 16) | (BOND_IP_4 << 24);
451 created_pkt = rte_pktmbuf_alloc(mbuf_pool);
452 if (created_pkt == NULL) {
453 cmdline_printf(cl, "Failed to allocate mbuf\n");
457 pkt_size = sizeof(struct ether_hdr) + sizeof(struct arp_hdr);
458 created_pkt->data_len = pkt_size;
459 created_pkt->pkt_len = pkt_size;
461 eth_hdr = rte_pktmbuf_mtod(created_pkt, struct ether_hdr *);
462 rte_eth_macaddr_get(BOND_PORT, ð_hdr->s_addr);
463 memset(ð_hdr->d_addr, 0xFF, ETHER_ADDR_LEN);
464 eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_ARP);
466 arp_hdr = (struct arp_hdr *)((char *)eth_hdr + sizeof(struct ether_hdr));
467 arp_hdr->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER);
468 arp_hdr->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
469 arp_hdr->arp_hln = ETHER_ADDR_LEN;
470 arp_hdr->arp_pln = sizeof(uint32_t);
471 arp_hdr->arp_op = rte_cpu_to_be_16(ARP_OP_REQUEST);
473 rte_eth_macaddr_get(BOND_PORT, &arp_hdr->arp_data.arp_sha);
474 arp_hdr->arp_data.arp_sip = bond_ip;
475 memset(&arp_hdr->arp_data.arp_tha, 0, ETHER_ADDR_LEN);
476 arp_hdr->arp_data.arp_tip =
477 ((unsigned char *)&res->ip.addr.ipv4)[0] |
478 (((unsigned char *)&res->ip.addr.ipv4)[1] << 8) |
479 (((unsigned char *)&res->ip.addr.ipv4)[2] << 16) |
480 (((unsigned char *)&res->ip.addr.ipv4)[3] << 24);
481 rte_eth_tx_burst(BOND_PORT, 0, &created_pkt, 1);
484 cmdline_printf(cl, "\n");
487 cmdline_parse_token_string_t cmd_obj_action_send =
488 TOKEN_STRING_INITIALIZER(struct cmd_obj_send_result, action, "send");
489 cmdline_parse_token_ipaddr_t cmd_obj_ip =
490 TOKEN_IPV4_INITIALIZER(struct cmd_obj_send_result, ip);
492 cmdline_parse_inst_t cmd_obj_send = {
493 .f = cmd_obj_send_parsed, /* function to call */
494 .data = NULL, /* 2nd arg of func */
495 .help_str = "send client_ip",
496 .tokens = { /* token list, NULL terminated */
497 (void *)&cmd_obj_action_send,
503 struct cmd_start_result {
504 cmdline_fixed_string_t start;
507 static void cmd_start_parsed(__attribute__((unused)) void *parsed_result,
509 __attribute__((unused)) void *data)
511 int slave_core_id = rte_lcore_id();
513 rte_spinlock_trylock(&global_flag_stru_p->lock);
514 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
515 if (lcore_config[global_flag_stru_p->LcoreMainCore].state != WAIT) {
516 rte_spinlock_unlock(&global_flag_stru_p->lock);
519 rte_spinlock_unlock(&global_flag_stru_p->lock);
521 cmdline_printf(cl, "lcore_main already running on core:%d\n",
522 global_flag_stru_p->LcoreMainCore);
523 rte_spinlock_unlock(&global_flag_stru_p->lock);
527 /* start lcore main on core != master_core - ARP response thread */
528 slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
529 if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
532 rte_spinlock_trylock(&global_flag_stru_p->lock);
533 global_flag_stru_p->LcoreMainIsRunning = 1;
534 rte_spinlock_unlock(&global_flag_stru_p->lock);
536 "Starting lcore_main on core %d:%d "
537 "Our IP:%d.%d.%d.%d\n",
539 rte_eal_remote_launch(lcore_main, NULL, slave_core_id),
547 cmdline_parse_token_string_t cmd_start_start =
548 TOKEN_STRING_INITIALIZER(struct cmd_start_result, start, "start");
550 cmdline_parse_inst_t cmd_start = {
551 .f = cmd_start_parsed, /* function to call */
552 .data = NULL, /* 2nd arg of func */
553 .help_str = "starts listening if not started at startup",
554 .tokens = { /* token list, NULL terminated */
555 (void *)&cmd_start_start,
560 struct cmd_help_result {
561 cmdline_fixed_string_t help;
564 static void cmd_help_parsed(__attribute__((unused)) void *parsed_result,
566 __attribute__((unused)) void *data)
569 "ALB - link bonding mode 6 example\n"
570 "send IP - sends one ARPrequest through bonding for IP.\n"
571 "start - starts listening ARPs.\n"
572 "stop - stops lcore_main.\n"
573 "show - shows some bond info: ex. active slaves etc.\n"
574 "help - prints help.\n"
575 "quit - terminate all threads and quit.\n"
579 cmdline_parse_token_string_t cmd_help_help =
580 TOKEN_STRING_INITIALIZER(struct cmd_help_result, help, "help");
582 cmdline_parse_inst_t cmd_help = {
583 .f = cmd_help_parsed, /* function to call */
584 .data = NULL, /* 2nd arg of func */
585 .help_str = "show help",
586 .tokens = { /* token list, NULL terminated */
587 (void *)&cmd_help_help,
592 struct cmd_stop_result {
593 cmdline_fixed_string_t stop;
596 static void cmd_stop_parsed(__attribute__((unused)) void *parsed_result,
598 __attribute__((unused)) void *data)
600 rte_spinlock_trylock(&global_flag_stru_p->lock);
601 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
603 "lcore_main not running on core:%d\n",
604 global_flag_stru_p->LcoreMainCore);
605 rte_spinlock_unlock(&global_flag_stru_p->lock);
608 global_flag_stru_p->LcoreMainIsRunning = 0;
609 if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0)
611 "error: lcore_main can not stop on core:%d\n",
612 global_flag_stru_p->LcoreMainCore);
615 "lcore_main stopped on core:%d\n",
616 global_flag_stru_p->LcoreMainCore);
617 rte_spinlock_unlock(&global_flag_stru_p->lock);
620 cmdline_parse_token_string_t cmd_stop_stop =
621 TOKEN_STRING_INITIALIZER(struct cmd_stop_result, stop, "stop");
623 cmdline_parse_inst_t cmd_stop = {
624 .f = cmd_stop_parsed, /* function to call */
625 .data = NULL, /* 2nd arg of func */
626 .help_str = "this command do not handle any arguments",
627 .tokens = { /* token list, NULL terminated */
628 (void *)&cmd_stop_stop,
633 struct cmd_quit_result {
634 cmdline_fixed_string_t quit;
637 static void cmd_quit_parsed(__attribute__((unused)) void *parsed_result,
639 __attribute__((unused)) void *data)
641 rte_spinlock_trylock(&global_flag_stru_p->lock);
642 if (global_flag_stru_p->LcoreMainIsRunning == 0) {
644 "lcore_main not running on core:%d\n",
645 global_flag_stru_p->LcoreMainCore);
646 rte_spinlock_unlock(&global_flag_stru_p->lock);
650 global_flag_stru_p->LcoreMainIsRunning = 0;
651 if (rte_eal_wait_lcore(global_flag_stru_p->LcoreMainCore) < 0)
653 "error: lcore_main can not stop on core:%d\n",
654 global_flag_stru_p->LcoreMainCore);
657 "lcore_main stopped on core:%d\n",
658 global_flag_stru_p->LcoreMainCore);
659 rte_spinlock_unlock(&global_flag_stru_p->lock);
663 cmdline_parse_token_string_t cmd_quit_quit =
664 TOKEN_STRING_INITIALIZER(struct cmd_quit_result, quit, "quit");
666 cmdline_parse_inst_t cmd_quit = {
667 .f = cmd_quit_parsed, /* function to call */
668 .data = NULL, /* 2nd arg of func */
669 .help_str = "this command do not handle any arguments",
670 .tokens = { /* token list, NULL terminated */
671 (void *)&cmd_quit_quit,
676 struct cmd_show_result {
677 cmdline_fixed_string_t show;
680 static void cmd_show_parsed(__attribute__((unused)) void *parsed_result,
682 __attribute__((unused)) void *data)
684 uint16_t slaves[16] = {0};
686 struct ether_addr addr;
689 while (i < slaves_count) {
690 rte_eth_macaddr_get(i, &addr);
696 rte_spinlock_trylock(&global_flag_stru_p->lock);
699 "packets received:Tot:%d Arp:%d IPv4:%d\n",
700 rte_eth_bond_active_slaves_get(BOND_PORT, slaves, len),
701 global_flag_stru_p->port_packets[0],
702 global_flag_stru_p->port_packets[1],
703 global_flag_stru_p->port_packets[2]);
704 rte_spinlock_unlock(&global_flag_stru_p->lock);
707 cmdline_parse_token_string_t cmd_show_show =
708 TOKEN_STRING_INITIALIZER(struct cmd_show_result, show, "show");
710 cmdline_parse_inst_t cmd_show = {
711 .f = cmd_show_parsed, /* function to call */
712 .data = NULL, /* 2nd arg of func */
713 .help_str = "this command do not handle any arguments",
714 .tokens = { /* token list, NULL terminated */
715 (void *)&cmd_show_show,
720 /****** CONTEXT (list of instruction) */
722 cmdline_parse_ctx_t main_ctx[] = {
723 (cmdline_parse_inst_t *)&cmd_start,
724 (cmdline_parse_inst_t *)&cmd_obj_send,
725 (cmdline_parse_inst_t *)&cmd_stop,
726 (cmdline_parse_inst_t *)&cmd_show,
727 (cmdline_parse_inst_t *)&cmd_quit,
728 (cmdline_parse_inst_t *)&cmd_help,
732 /* prompt function, called from main on MASTER lcore */
733 static void prompt(__attribute__((unused)) void *arg1)
737 cl = cmdline_stdin_new(main_ctx, "bond6>");
739 cmdline_interact(cl);
740 cmdline_stdin_exit(cl);
744 /* Main function, does initialisation and calls the per-lcore functions */
746 main(int argc, char *argv[])
749 uint16_t nb_ports, i;
752 ret = rte_eal_init(argc, argv);
753 rte_devargs_dump(stdout);
755 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
759 nb_ports = rte_eth_dev_count_avail();
761 rte_exit(EXIT_FAILURE, "Give at least one port\n");
762 else if (nb_ports > MAX_PORTS)
763 rte_exit(EXIT_FAILURE, "You can have max 4 ports\n");
765 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NB_MBUF, 32,
766 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
767 if (mbuf_pool == NULL)
768 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
770 /* initialize all ports */
771 slaves_count = nb_ports;
772 RTE_ETH_FOREACH_DEV(i) {
773 slave_port_init(i, mbuf_pool);
777 bond_port_init(mbuf_pool);
779 rte_spinlock_init(&global_flag_stru_p->lock);
780 int slave_core_id = rte_lcore_id();
782 /* check state of lcores */
783 RTE_LCORE_FOREACH_SLAVE(slave_core_id) {
784 if (lcore_config[slave_core_id].state != WAIT)
787 /* start lcore main on core != master_core - ARP response thread */
788 slave_core_id = rte_get_next_lcore(rte_lcore_id(), 1, 0);
789 if ((slave_core_id >= RTE_MAX_LCORE) || (slave_core_id == 0))
792 global_flag_stru_p->LcoreMainIsRunning = 1;
793 global_flag_stru_p->LcoreMainCore = slave_core_id;
794 printf("Starting lcore_main on core %d:%d Our IP:%d.%d.%d.%d\n",
796 rte_eal_remote_launch((lcore_function_t *)lcore_main,
805 /* Start prompt for user interact */