X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fload_balancer%2Finit.c;h=f2045f235f30f45d20c376fb1e7f655e2408387c;hb=6b2a47de074a0a6abc9a7c8d1aaa2a66898b7b48;hp=12e88870a79022e97fc4389cc366cbc3de06d43f;hpb=af75078fece3615088e561357c1e97603e43a5fe;p=dpdk.git diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c index 12e88870a7..f2045f235f 100644 --- a/examples/load_balancer/init.c +++ b/examples/load_balancer/init.c @@ -1,36 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2012 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * version: DPDK.L.1.2.3-3 +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -49,10 +18,7 @@ #include #include #include -#include -#include #include -#include #include #include #include @@ -61,7 +27,6 @@ #include #include #include -#include #include #include #include @@ -78,42 +43,22 @@ static struct rte_eth_conf port_conf = { .rxmode = { + .mq_mode = ETH_MQ_RX_RSS, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 0, /**< CRC stripped by hardware */ + .offloads = (DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_CRC_STRIP), }, .rx_adv_conf = { .rss_conf = { .rss_key = NULL, - .rss_hf = ETH_RSS_IPV4, + .rss_hf = ETH_RSS_IP, }, }, .txmode = { + .mq_mode = ETH_MQ_TX_NONE, }, }; -static struct rte_eth_rxconf rx_conf = { - .rx_thresh = { - .pthresh = APP_DEFAULT_NIC_RX_PTHRESH, - .hthresh = APP_DEFAULT_NIC_RX_HTHRESH, - .wthresh = APP_DEFAULT_NIC_RX_WTHRESH, - }, - .rx_free_thresh = APP_DEFAULT_NIC_RX_FREE_THRESH, -}; - -static struct rte_eth_txconf tx_conf = { - .tx_thresh = { - .pthresh = APP_DEFAULT_NIC_TX_PTHRESH, - .hthresh = APP_DEFAULT_NIC_TX_HTHRESH, - .wthresh = APP_DEFAULT_NIC_TX_WTHRESH, - }, - .tx_free_thresh = APP_DEFAULT_NIC_TX_FREE_THRESH, - .tx_rs_thresh = APP_DEFAULT_NIC_TX_RS_THRESH, -}; - static void app_assign_worker_ids(void) { @@ -136,7 +81,7 @@ app_assign_worker_ids(void) static void app_init_mbuf_pools(void) { - uint32_t socket, lcore; + unsigned socket, lcore; /* Init the buffer pools */ for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) { @@ -145,18 +90,12 @@ app_init_mbuf_pools(void) continue; } - rte_snprintf(name, sizeof(name), "mbuf_pool_%u", socket); + snprintf(name, sizeof(name), "mbuf_pool_%u", socket); printf("Creating the mbuf pool for socket %u ...\n", socket); - app.pools[socket] = rte_mempool_create( - name, - APP_DEFAULT_MEMPOOL_BUFFERS, - APP_DEFAULT_MBUF_SIZE, + app.pools[socket] = rte_pktmbuf_pool_create( + name, APP_DEFAULT_MEMPOOL_BUFFERS, APP_DEFAULT_MEMPOOL_CACHE_SIZE, - sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, NULL, - rte_pktmbuf_init, NULL, - socket, - 0); + 0, APP_DEFAULT_MBUF_DATA_SIZE, socket); if (app.pools[socket] == NULL) { rte_panic("Cannot create mbuf pool on socket %u\n", socket); } @@ -175,7 +114,7 @@ app_init_mbuf_pools(void) static void app_init_lpm_tables(void) { - uint32_t socket, lcore; + unsigned socket, lcore; /* Init the LPM tables */ for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) { @@ -186,13 +125,17 @@ app_init_lpm_tables(void) continue; } - rte_snprintf(name, sizeof(name), "lpm_table_%u", socket); + struct rte_lpm_config lpm_config; + + lpm_config.max_rules = APP_MAX_LPM_RULES; + lpm_config.number_tbl8s = 256; + lpm_config.flags = 0; + snprintf(name, sizeof(name), "lpm_table_%u", socket); printf("Creating the LPM table for socket %u ...\n", socket); app.lpm_tables[socket] = rte_lpm_create( name, socket, - APP_MAX_LPM_RULES, - RTE_LPM_MEMZONE); + &lpm_config); if (app.lpm_tables[socket] == NULL) { rte_panic("Unable to create LPM table on socket %u\n", socket); } @@ -207,9 +150,10 @@ app_init_lpm_tables(void) if (ret < 0) { rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n", - rule, app.lpm_rules[rule].ip, - (uint32_t) app.lpm_rules[rule].depth, - (uint32_t) app.lpm_rules[rule].if_out, + (unsigned) rule, + (unsigned) app.lpm_rules[rule].ip, + (unsigned) app.lpm_rules[rule].depth, + (unsigned) app.lpm_rules[rule].if_out, socket, ret); } @@ -230,12 +174,12 @@ app_init_lpm_tables(void) static void app_init_rings_rx(void) { - uint32_t lcore; + unsigned lcore; /* Initialize the rings for the RX side */ for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io; - uint32_t socket_io, lcore_worker; + unsigned socket_io, lcore_worker; if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || (lp_io->rx.n_nic_queues == 0)) { @@ -257,7 +201,7 @@ app_init_rings_rx(void) lcore, socket_io, lcore_worker); - rte_snprintf(name, sizeof(name), "app_ring_rx_s%u_io%u_w%u", + snprintf(name, sizeof(name), "app_ring_rx_s%u_io%u_w%u", socket_io, lcore, lcore_worker); @@ -309,12 +253,12 @@ app_init_rings_rx(void) static void app_init_rings_tx(void) { - uint32_t lcore; + unsigned lcore; /* Initialize the rings for the TX side */ for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker; - uint32_t port; + unsigned port; if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) { continue; @@ -330,7 +274,7 @@ app_init_rings_tx(void) continue; } - if (app_get_lcore_for_nic_tx((uint8_t) port, &lcore_io) < 0) { + if (app_get_lcore_for_nic_tx(port, &lcore_io) < 0) { rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n", port); } @@ -339,8 +283,8 @@ app_init_rings_tx(void) socket_io = rte_lcore_to_socket_id(lcore_io); printf("Creating ring to connect worker lcore %u with TX port %u (through I/O lcore %u) (socket %u) ...\n", - lcore, port, lcore_io, socket_io); - rte_snprintf(name, sizeof(name), "app_ring_tx_s%u_w%u_p%u", socket_io, lcore, port); + lcore, port, (unsigned)lcore_io, (unsigned)socket_io); + snprintf(name, sizeof(name), "app_ring_tx_s%u_w%u_p%u", socket_io, lcore, port); ring = rte_ring_create( name, app.ring_tx_size, @@ -359,7 +303,7 @@ app_init_rings_tx(void) for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) { struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io; - uint32_t i; + unsigned i; if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) || (lp_io->tx.n_nic_ports == 0)) { @@ -367,7 +311,7 @@ app_init_rings_tx(void) } for (i = 0; i < lp_io->tx.n_nic_ports; i ++){ - uint32_t port, j; + unsigned port, j; port = lp_io->tx.nic_ports[i]; for (j = 0; j < app_get_lcores_worker(); j ++) { @@ -379,34 +323,85 @@ app_init_rings_tx(void) } } +/* Check the link status of all ports in up to 9s, and print them finally */ static void -app_init_nics(void) +check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) { - uint32_t socket, lcore; - uint8_t port, queue; - int ret; +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ + uint16_t portid; + uint8_t count, all_ports_up, print_flag = 0; + struct rte_eth_link link; + uint32_t n_rx_queues, n_tx_queues; + + printf("\nChecking link status"); + fflush(stdout); + for (count = 0; count <= MAX_CHECK_TIME; count++) { + all_ports_up = 1; + for (portid = 0; portid < port_num; portid++) { + if ((port_mask & (1 << portid)) == 0) + continue; + n_rx_queues = app_get_nic_rx_queues_per_port(portid); + n_tx_queues = app.nic_tx_port_mask[portid]; + if ((n_rx_queues == 0) && (n_tx_queues == 0)) + continue; + memset(&link, 0, sizeof(link)); + rte_eth_link_get_nowait(portid, &link); + /* print link status if flag set */ + if (print_flag == 1) { + if (link.link_status) + printf( + "Port%d Link Up - speed %uMbps - %s\n", + portid, link.link_speed, + (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? + ("full-duplex") : ("half-duplex\n")); + else + printf("Port %d Link Down\n", portid); + continue; + } + /* clear all_ports_up flag if any link down */ + if (link.link_status == ETH_LINK_DOWN) { + all_ports_up = 0; + break; + } + } + /* after finally printing all link status, get out */ + if (print_flag == 1) + break; + + if (all_ports_up == 0) { + printf("."); + fflush(stdout); + rte_delay_ms(CHECK_INTERVAL); + } - /* Init driver */ - printf("Initializing the PMD driver ...\n"); -#ifdef RTE_LIBRTE_IGB_PMD - if (rte_igb_pmd_init() < 0) { - rte_panic("Cannot init IGB PMD\n"); - } -#endif -#ifdef RTE_LIBRTE_IXGBE_PMD - if (rte_ixgbe_pmd_init() < 0) { - rte_panic("Cannot init IXGBE PMD\n"); - } -#endif - if (rte_eal_pci_probe() < 0) { - rte_panic("Cannot probe PCI\n"); + /* set the print_flag if all ports up or timeout */ + if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) { + print_flag = 1; + printf("done\n"); + } } +} + +static void +app_init_nics(void) +{ + unsigned socket; + uint32_t lcore; + uint16_t port; + uint8_t queue; + int ret; + uint32_t n_rx_queues, n_tx_queues; /* Init NIC ports and queues, then start the ports */ for (port = 0; port < APP_MAX_NIC_PORTS; port ++) { - struct rte_eth_link link; struct rte_mempool *pool; - uint32_t n_rx_queues, n_tx_queues; + uint16_t nic_rx_ring_size; + uint16_t nic_tx_ring_size; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + struct rte_eth_dev_info dev_info; + struct rte_eth_conf local_port_conf = port_conf; n_rx_queues = app_get_nic_rx_queues_per_port(port); n_tx_queues = app.nic_tx_port_mask[port]; @@ -416,17 +411,46 @@ app_init_nics(void) } /* Init port */ - printf("Initializing NIC port %u ...\n", (uint32_t) port); + printf("Initializing NIC port %u ...\n", port); + rte_eth_dev_info_get(port, &dev_info); + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + printf("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + port, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } + ret = rte_eth_dev_configure( port, (uint8_t) n_rx_queues, (uint8_t) n_tx_queues, - &port_conf); + &local_port_conf); if (ret < 0) { - rte_panic("Cannot init NIC port %u (%d)\n", (uint32_t) port, ret); + rte_panic("Cannot init NIC port %u (%d)\n", port, ret); } rte_eth_promiscuous_enable(port); + nic_rx_ring_size = app.nic_rx_ring_size; + nic_tx_ring_size = app.nic_tx_ring_size; + ret = rte_eth_dev_adjust_nb_rx_tx_desc( + port, &nic_rx_ring_size, &nic_tx_ring_size); + if (ret < 0) { + rte_panic("Cannot adjust number of descriptors for port %u (%d)\n", + port, ret); + } + app.nic_rx_ring_size = nic_rx_ring_size; + app.nic_tx_ring_size = nic_tx_ring_size; + + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = local_port_conf.rxmode.offloads; /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { if (app.nic_rx_queue_mask[port][queue] == 0) { @@ -438,35 +462,34 @@ app_init_nics(void) pool = app.lcore_params[lcore].pool; printf("Initializing NIC port %u RX queue %u ...\n", - (uint32_t) port, - (uint32_t) queue); + port, queue); ret = rte_eth_rx_queue_setup( port, queue, (uint16_t) app.nic_rx_ring_size, socket, - &rx_conf, + &rxq_conf, pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", - (uint32_t) queue, - (uint32_t) port, - ret); + queue, port, ret); } } + txq_conf = dev_info.default_txconf; + txq_conf.offloads = local_port_conf.txmode.offloads; /* Init TX queues */ if (app.nic_tx_port_mask[port] == 1) { app_get_lcore_for_nic_tx(port, &lcore); socket = rte_lcore_to_socket_id(lcore); printf("Initializing NIC port %u TX queue 0 ...\n", - (uint32_t) port); + port); ret = rte_eth_tx_queue_setup( port, 0, (uint16_t) app.nic_tx_ring_size, socket, - &tx_conf); + &txq_conf); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", port, @@ -479,18 +502,9 @@ app_init_nics(void) if (ret < 0) { rte_panic("Cannot start port %d (%d)\n", port, ret); } - - /* Get link status */ - rte_eth_link_get(port, &link); - if (link.link_status) { - printf("Port %u is UP (%u Mbps)\n", - (uint32_t) port, - (unsigned) link.link_speed); - } else { - printf("Port %u is DOWN\n", - (uint32_t) port); - } } + + check_all_ports_link_status(APP_MAX_NIC_PORTS, (~0x0)); } void