4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_tailq.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
53 #include <rte_per_lcore.h>
54 #include <rte_launch.h>
55 #include <rte_atomic.h>
56 #include <rte_cycles.h>
57 #include <rte_prefetch.h>
58 #include <rte_lcore.h>
59 #include <rte_per_lcore.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_interrupts.h>
63 #include <rte_random.h>
64 #include <rte_debug.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
68 #include <rte_mempool.h>
70 #include <rte_malloc.h>
71 #include <rte_fbk_hash.h>
76 #define RTE_LOGTYPE_IPv4_MULTICAST RTE_LOGTYPE_USER1
80 #define MCAST_CLONE_PORTS 2
81 #define MCAST_CLONE_SEGS 2
83 #define PKT_MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
84 #define NB_PKT_MBUF 8192
86 #define HDR_MBUF_SIZE (sizeof(struct rte_mbuf) + 2 * RTE_PKTMBUF_HEADROOM)
87 #define NB_HDR_MBUF (NB_PKT_MBUF * MAX_PORTS)
89 #define CLONE_MBUF_SIZE (sizeof(struct rte_mbuf))
90 #define NB_CLONE_MBUF (NB_PKT_MBUF * MCAST_CLONE_PORTS * MCAST_CLONE_SEGS * 2)
92 /* allow max jumbo frame 9.5 KB */
93 #define JUMBO_FRAME_MAX_SIZE 0x2600
95 #define MAX_PKT_BURST 32
96 #define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
98 /* Configure how many packets ahead to prefetch, when reading packets */
99 #define PREFETCH_OFFSET 3
102 * Construct Ethernet multicast address from IPv4 multicast address.
103 * Citing RFC 1112, section 6.4:
104 * "An IP host group address is mapped to an Ethernet multicast address
105 * by placing the low-order 23-bits of the IP address into the low-order
106 * 23 bits of the Ethernet multicast address 01-00-5E-00-00-00 (hex)."
108 #define ETHER_ADDR_FOR_IPV4_MCAST(x) \
109 (rte_cpu_to_be_64(0x01005e000000ULL | ((x) & 0x7fffff)) >> 16)
112 * Configurable number of RX/TX ring descriptors
114 #define RTE_TEST_RX_DESC_DEFAULT 128
115 #define RTE_TEST_TX_DESC_DEFAULT 512
116 static uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
117 static uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
119 /* ethernet addresses of ports */
120 static struct ether_addr ports_eth_addr[MAX_PORTS];
122 /* mask of enabled ports */
123 static uint32_t enabled_port_mask = 0;
125 static uint8_t nb_ports = 0;
127 static int rx_queue_per_lcore = 1;
131 struct rte_mbuf *m_table[MAX_PKT_BURST];
134 #define MAX_RX_QUEUE_PER_LCORE 16
135 #define MAX_TX_QUEUE_PER_PORT 16
136 struct lcore_queue_conf {
139 uint8_t rx_queue_list[MAX_RX_QUEUE_PER_LCORE];
140 uint16_t tx_queue_id[MAX_PORTS];
141 struct mbuf_table tx_mbufs[MAX_PORTS];
142 } __rte_cache_aligned;
143 static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
145 static const struct rte_eth_conf port_conf = {
147 .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
149 .header_split = 0, /**< Header Split disabled */
150 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
151 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
152 .jumbo_frame = 1, /**< Jumbo Frame Support enabled */
153 .hw_strip_crc = 0, /**< CRC stripped by hardware */
156 .mq_mode = ETH_MQ_TX_NONE,
160 static struct rte_mempool *packet_pool, *header_pool, *clone_pool;
164 static struct rte_fbk_hash_params mcast_hash_params = {
165 .name = "MCAST_HASH",
167 .entries_per_bucket = 4,
173 struct rte_fbk_hash_table *mcast_hash = NULL;
175 struct mcast_group_params {
180 static struct mcast_group_params mcast_group_table[] = {
181 {IPv4(224,0,0,101), 0x1},
182 {IPv4(224,0,0,102), 0x2},
183 {IPv4(224,0,0,103), 0x3},
184 {IPv4(224,0,0,104), 0x4},
185 {IPv4(224,0,0,105), 0x5},
186 {IPv4(224,0,0,106), 0x6},
187 {IPv4(224,0,0,107), 0x7},
188 {IPv4(224,0,0,108), 0x8},
189 {IPv4(224,0,0,109), 0x9},
190 {IPv4(224,0,0,110), 0xA},
191 {IPv4(224,0,0,111), 0xB},
192 {IPv4(224,0,0,112), 0xC},
193 {IPv4(224,0,0,113), 0xD},
194 {IPv4(224,0,0,114), 0xE},
195 {IPv4(224,0,0,115), 0xF},
198 #define N_MCAST_GROUPS \
199 (sizeof (mcast_group_table) / sizeof (mcast_group_table[0]))
202 /* Send burst of packets on an output interface */
204 send_burst(struct lcore_queue_conf *qconf, uint8_t port)
206 struct rte_mbuf **m_table;
210 queueid = qconf->tx_queue_id[port];
211 m_table = (struct rte_mbuf **)qconf->tx_mbufs[port].m_table;
212 n = qconf->tx_mbufs[port].len;
214 ret = rte_eth_tx_burst(port, queueid, m_table, n);
215 while (unlikely (ret < n)) {
216 rte_pktmbuf_free(m_table[ret]);
220 qconf->tx_mbufs[port].len = 0;
223 /* Get number of bits set. */
224 static inline uint32_t
229 for (n = 0; v != 0; v &= v - 1, n++)
236 * Create the output multicast packet based on the given input packet.
237 * There are two approaches for creating outgoing packet, though both
238 * are based on data zero-copy idea, they differ in few details:
239 * First one creates a clone of the input packet, e.g - walk though all
240 * segments of the input packet, and for each of them create a new packet
241 * mbuf and attach that new mbuf to the segment (refer to rte_pktmbuf_clone()
242 * for more details). Then new mbuf is allocated for the packet header
243 * and is prepended to the 'clone' mbuf.
244 * Second approach doesn't make a clone, it just increment refcnt for all
245 * input packet segments. Then it allocates new mbuf for the packet header
246 * and prepends it to the input packet.
247 * Basically first approach reuses only input packet's data, but creates
248 * it's own copy of packet's metadata. Second approach reuses both input's
249 * packet data and metadata.
250 * The advantage of first approach - is that each outgoing packet has it's
251 * own copy of metadata, so we can safely modify data pointer of the
252 * input packet. That allows us to skip creation if the output packet for
253 * the last destination port, but instead modify input packet's header inplace,
254 * e.g: for N destination ports we need to invoke mcast_out_pkt (N-1) times.
255 * The advantage of second approach - less work for each outgoing packet,
256 * e.g: we skip "clone" operation completely. Though it comes with a price -
257 * input packet's metadata has to be intact. So for N destination ports we
258 * need to invoke mcast_out_pkt N times.
259 * So for small number of outgoing ports (and segments in the input packet)
260 * first approach will be faster.
261 * As number of outgoing ports (and/or input segments) will grow,
262 * second way will become more preferable.
267 * Control which of the two approaches described above should be used:
268 * - 0 - use second approach:
269 * Don't "clone" input packet.
270 * Prepend new header directly to the input packet
271 * - 1 - use first approach:
272 * Make a "clone" of input packet first.
273 * Prepend new header to the clone of the input packet
275 * - The pointer to the new outgoing packet.
276 * - NULL if operation failed.
278 static inline struct rte_mbuf *
279 mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
281 struct rte_mbuf *hdr;
283 /* Create new mbuf for the header. */
284 if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL))
287 /* If requested, then make a new clone packet. */
288 if (use_clone != 0 &&
289 unlikely ((pkt = rte_pktmbuf_clone(pkt, clone_pool)) == NULL)) {
290 rte_pktmbuf_free(hdr);
294 /* prepend new header */
298 /* update header's fields */
299 hdr->pkt_len = (uint16_t)(hdr->data_len + pkt->pkt_len);
300 hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1);
302 /* copy metadata from source packet*/
303 hdr->port = pkt->port;
304 hdr->vlan_tci = pkt->vlan_tci;
305 hdr->l2_l3_len = pkt->l2_l3_len;
306 hdr->hash = pkt->hash;
308 hdr->ol_flags = pkt->ol_flags;
310 __rte_mbuf_sanity_check(hdr, 1);
315 * Write new Ethernet header to the outgoing packet,
316 * and put it into the outgoing queue for the given port.
319 mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr,
320 struct lcore_queue_conf *qconf, uint8_t port)
322 struct ether_hdr *ethdr;
325 /* Construct Ethernet header. */
326 ethdr = (struct ether_hdr *)rte_pktmbuf_prepend(pkt, (uint16_t)sizeof(*ethdr));
327 RTE_MBUF_ASSERT(ethdr != NULL);
329 ether_addr_copy(dest_addr, ðdr->d_addr);
330 ether_addr_copy(&ports_eth_addr[port], ðdr->s_addr);
331 ethdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
333 /* Put new packet into the output queue */
334 len = qconf->tx_mbufs[port].len;
335 qconf->tx_mbufs[port].m_table[len] = pkt;
336 qconf->tx_mbufs[port].len = ++len;
338 /* Transmit packets */
339 if (unlikely(MAX_PKT_BURST == len))
340 send_burst(qconf, port);
343 /* Multicast forward of the input packet */
345 mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
348 struct ipv4_hdr *iphdr;
349 uint32_t dest_addr, port_mask, port_num, use_clone;
354 struct ether_addr as_addr;
357 /* Remove the Ethernet header from the input packet */
358 iphdr = (struct ipv4_hdr *)rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
359 RTE_MBUF_ASSERT(iphdr != NULL);
361 dest_addr = rte_be_to_cpu_32(iphdr->dst_addr);
364 * Check that it is a valid multicast address and
365 * we have some active ports assigned to it.
367 if(!IS_IPV4_MCAST(dest_addr) ||
368 (hash = rte_fbk_hash_lookup(mcast_hash, dest_addr)) <= 0 ||
369 (port_mask = hash & enabled_port_mask) == 0) {
374 /* Calculate number of destination ports. */
375 port_num = bitcnt(port_mask);
377 /* Should we use rte_pktmbuf_clone() or not. */
378 use_clone = (port_num <= MCAST_CLONE_PORTS &&
379 m->nb_segs <= MCAST_CLONE_SEGS);
381 /* Mark all packet's segments as referenced port_num times */
383 rte_pktmbuf_refcnt_update(m, (uint16_t)port_num);
385 /* construct destination ethernet address */
386 dst_eth_addr.as_int = ETHER_ADDR_FOR_IPV4_MCAST(dest_addr);
388 for (port = 0; use_clone != port_mask; port_mask >>= 1, port++) {
390 /* Prepare output packet and send it out. */
391 if ((port_mask & 1) != 0) {
392 if (likely ((mc = mcast_out_pkt(m, use_clone)) != NULL))
393 mcast_send_pkt(mc, &dst_eth_addr.as_addr,
395 else if (use_clone == 0)
401 * If we making clone packets, then, for the last destination port,
402 * we can overwrite input packet's metadata.
405 mcast_send_pkt(m, &dst_eth_addr.as_addr, qconf, port);
410 /* Send burst of outgoing packet, if timeout expires. */
412 send_timeout_burst(struct lcore_queue_conf *qconf)
416 const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
418 cur_tsc = rte_rdtsc();
419 if (likely (cur_tsc < qconf->tx_tsc + drain_tsc))
422 for (portid = 0; portid < MAX_PORTS; portid++) {
423 if (qconf->tx_mbufs[portid].len != 0)
424 send_burst(qconf, portid);
426 qconf->tx_tsc = cur_tsc;
429 /* main processing loop */
431 main_loop(__rte_unused void *dummy)
433 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
437 struct lcore_queue_conf *qconf;
439 lcore_id = rte_lcore_id();
440 qconf = &lcore_queue_conf[lcore_id];
443 if (qconf->n_rx_queue == 0) {
444 RTE_LOG(INFO, IPv4_MULTICAST, "lcore %u has nothing to do\n",
449 RTE_LOG(INFO, IPv4_MULTICAST, "entering main loop on lcore %u\n",
452 for (i = 0; i < qconf->n_rx_queue; i++) {
454 portid = qconf->rx_queue_list[i];
455 RTE_LOG(INFO, IPv4_MULTICAST, " -- lcoreid=%u portid=%d\n",
456 lcore_id, (int) portid);
462 * Read packet from RX queues
464 for (i = 0; i < qconf->n_rx_queue; i++) {
466 portid = qconf->rx_queue_list[i];
467 nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
470 /* Prefetch first packets */
471 for (j = 0; j < PREFETCH_OFFSET && j < nb_rx; j++) {
472 rte_prefetch0(rte_pktmbuf_mtod(
473 pkts_burst[j], void *));
476 /* Prefetch and forward already prefetched packets */
477 for (j = 0; j < (nb_rx - PREFETCH_OFFSET); j++) {
478 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[
479 j + PREFETCH_OFFSET], void *));
480 mcast_forward(pkts_burst[j], qconf);
483 /* Forward remaining prefetched packets */
484 for (; j < nb_rx; j++) {
485 mcast_forward(pkts_burst[j], qconf);
489 /* Send out packets from TX queues */
490 send_timeout_burst(qconf);
496 print_usage(const char *prgname)
498 printf("%s [EAL options] -- -p PORTMASK [-q NQ]\n"
499 " -p PORTMASK: hexadecimal bitmask of ports to configure\n"
500 " -q NQ: number of queue (=ports) per lcore (default is 1)\n",
505 parse_portmask(const char *portmask)
510 /* parse hexadecimal string */
511 pm = strtoul(portmask, &end, 16);
512 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
515 return ((uint32_t)pm);
519 parse_nqueue(const char *q_arg)
524 /* parse numerical string */
526 n = strtoul(q_arg, &end, 0);
527 if (errno != 0 || end == NULL || *end != '\0' ||
528 n == 0 || n >= MAX_RX_QUEUE_PER_LCORE)
534 /* Parse the argument given in the command line of the application */
536 parse_args(int argc, char **argv)
541 char *prgname = argv[0];
542 static struct option lgopts[] = {
548 while ((opt = getopt_long(argc, argvopt, "p:q:",
549 lgopts, &option_index)) != EOF) {
554 enabled_port_mask = parse_portmask(optarg);
555 if (enabled_port_mask == 0) {
556 printf("invalid portmask\n");
557 print_usage(prgname);
564 rx_queue_per_lcore = parse_nqueue(optarg);
565 if (rx_queue_per_lcore < 0) {
566 printf("invalid queue number\n");
567 print_usage(prgname);
573 print_usage(prgname);
579 argv[optind-1] = prgname;
582 optind = 0; /* reset getopt lib */
587 print_ethaddr(const char *name, struct ether_addr *eth_addr)
589 printf("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
590 eth_addr->addr_bytes[0],
591 eth_addr->addr_bytes[1],
592 eth_addr->addr_bytes[2],
593 eth_addr->addr_bytes[3],
594 eth_addr->addr_bytes[4],
595 eth_addr->addr_bytes[5]);
599 init_mcast_hash(void)
603 mcast_hash_params.socket_id = rte_socket_id();
604 mcast_hash = rte_fbk_hash_create(&mcast_hash_params);
605 if (mcast_hash == NULL){
609 for (i = 0; i < N_MCAST_GROUPS; i ++){
610 if (rte_fbk_hash_add_key(mcast_hash,
611 mcast_group_table[i].ip,
612 mcast_group_table[i].port_mask) < 0) {
620 /* Check the link status of all ports in up to 9s, and print them finally */
622 check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
624 #define CHECK_INTERVAL 100 /* 100ms */
625 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
626 uint8_t portid, count, all_ports_up, print_flag = 0;
627 struct rte_eth_link link;
629 printf("\nChecking link status");
631 for (count = 0; count <= MAX_CHECK_TIME; count++) {
633 for (portid = 0; portid < port_num; portid++) {
634 if ((port_mask & (1 << portid)) == 0)
636 memset(&link, 0, sizeof(link));
637 rte_eth_link_get_nowait(portid, &link);
638 /* print link status if flag set */
639 if (print_flag == 1) {
640 if (link.link_status)
641 printf("Port %d Link Up - speed %u "
642 "Mbps - %s\n", (uint8_t)portid,
643 (unsigned)link.link_speed,
644 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
645 ("full-duplex") : ("half-duplex\n"));
647 printf("Port %d Link Down\n",
651 /* clear all_ports_up flag if any link down */
652 if (link.link_status == 0) {
657 /* after finally printing all link status, get out */
661 if (all_ports_up == 0) {
664 rte_delay_ms(CHECK_INTERVAL);
667 /* set the print_flag if all ports up or timeout */
668 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
676 MAIN(int argc, char **argv)
678 struct lcore_queue_conf *qconf;
679 struct rte_eth_dev_info dev_info;
680 struct rte_eth_txconf *txconf;
683 unsigned lcore_id = 0, rx_lcore_id = 0;
684 uint32_t n_tx_queue, nb_lcores;
688 ret = rte_eal_init(argc, argv);
690 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
694 /* parse application arguments (after the EAL ones) */
695 ret = parse_args(argc, argv);
697 rte_exit(EXIT_FAILURE, "Invalid IPV4_MULTICAST parameters\n");
699 /* create the mbuf pools */
700 packet_pool = rte_mempool_create("packet_pool", NB_PKT_MBUF,
701 PKT_MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private),
702 rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
705 if (packet_pool == NULL)
706 rte_exit(EXIT_FAILURE, "Cannot init packet mbuf pool\n");
708 header_pool = rte_mempool_create("header_pool", NB_HDR_MBUF,
709 HDR_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL,
712 if (header_pool == NULL)
713 rte_exit(EXIT_FAILURE, "Cannot init header mbuf pool\n");
715 clone_pool = rte_mempool_create("clone_pool", NB_CLONE_MBUF,
716 CLONE_MBUF_SIZE, 32, 0, NULL, NULL, rte_pktmbuf_init, NULL,
719 if (clone_pool == NULL)
720 rte_exit(EXIT_FAILURE, "Cannot init clone mbuf pool\n");
722 nb_ports = rte_eth_dev_count();
724 rte_exit(EXIT_FAILURE, "No physical ports!\n");
725 if (nb_ports > MAX_PORTS)
726 nb_ports = MAX_PORTS;
728 nb_lcores = rte_lcore_count();
730 /* initialize all ports */
731 for (portid = 0; portid < nb_ports; portid++) {
732 /* skip ports that are not enabled */
733 if ((enabled_port_mask & (1 << portid)) == 0) {
734 printf("Skipping disabled port %d\n", portid);
738 qconf = &lcore_queue_conf[rx_lcore_id];
740 /* get the lcore_id for this port */
741 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
742 qconf->n_rx_queue == (unsigned)rx_queue_per_lcore) {
745 qconf = &lcore_queue_conf[rx_lcore_id];
747 if (rx_lcore_id >= RTE_MAX_LCORE)
748 rte_exit(EXIT_FAILURE, "Not enough cores\n");
750 qconf->rx_queue_list[qconf->n_rx_queue] = portid;
754 printf("Initializing port %d on lcore %u... ", portid,
758 n_tx_queue = nb_lcores;
759 if (n_tx_queue > MAX_TX_QUEUE_PER_PORT)
760 n_tx_queue = MAX_TX_QUEUE_PER_PORT;
761 ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue,
764 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n",
767 rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
768 print_ethaddr(" Address:", &ports_eth_addr[portid]);
771 /* init one RX queue */
773 printf("rxq=%hu ", queueid);
775 ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
776 rte_eth_dev_socket_id(portid),
780 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%d\n",
783 /* init one TX queue per couple (lcore,port) */
786 RTE_LCORE_FOREACH(lcore_id) {
787 if (rte_lcore_is_enabled(lcore_id) == 0)
789 printf("txq=%u,%hu ", lcore_id, queueid);
792 rte_eth_dev_info_get(portid, &dev_info);
793 txconf = &dev_info.default_txconf;
794 txconf->txq_flags = 0;
795 ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
796 rte_lcore_to_socket_id(lcore_id), txconf);
798 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
799 "port=%d\n", ret, portid);
801 qconf = &lcore_queue_conf[lcore_id];
802 qconf->tx_queue_id[portid] = queueid;
807 ret = rte_eth_dev_start(portid);
809 rte_exit(EXIT_FAILURE, "rte_eth_dev_start: err=%d, port=%d\n",
815 check_all_ports_link_status(nb_ports, enabled_port_mask);
817 /* initialize the multicast hash */
818 int retval = init_mcast_hash();
820 rte_exit(EXIT_FAILURE, "Cannot build the multicast hash\n");
822 /* launch per-lcore init on every lcore */
823 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
824 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
825 if (rte_eal_wait_lcore(lcore_id) < 0)