4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_launch.h>
53 #include <rte_atomic.h>
54 #include <rte_cycles.h>
55 #include <rte_prefetch.h>
56 #include <rte_lcore.h>
57 #include <rte_per_lcore.h>
58 #include <rte_branch_prediction.h>
59 #include <rte_interrupts.h>
61 #include <rte_random.h>
62 #include <rte_debug.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
66 #include <rte_mempool.h>
68 #include <rte_string_fns.h>
75 static struct rte_eth_conf port_conf = {
77 .mq_mode = ETH_MQ_RX_RSS,
79 .header_split = 0, /**< Header Split disabled */
80 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
81 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
82 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
83 .hw_strip_crc = 1, /**< CRC stripped by hardware */
92 .mq_mode = ETH_MQ_TX_NONE,
97 app_assign_worker_ids(void)
99 uint32_t lcore, worker_id;
101 /* Assign ID for each worker */
103 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
104 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
106 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
110 lp_worker->worker_id = worker_id;
116 app_init_mbuf_pools(void)
118 unsigned socket, lcore;
120 /* Init the buffer pools */
121 for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
123 if (app_is_socket_used(socket) == 0) {
127 snprintf(name, sizeof(name), "mbuf_pool_%u", socket);
128 printf("Creating the mbuf pool for socket %u ...\n", socket);
129 app.pools[socket] = rte_pktmbuf_pool_create(
130 name, APP_DEFAULT_MEMPOOL_BUFFERS,
131 APP_DEFAULT_MEMPOOL_CACHE_SIZE,
132 0, APP_DEFAULT_MBUF_DATA_SIZE, socket);
133 if (app.pools[socket] == NULL) {
134 rte_panic("Cannot create mbuf pool on socket %u\n", socket);
138 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
139 if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) {
143 socket = rte_lcore_to_socket_id(lcore);
144 app.lcore_params[lcore].pool = app.pools[socket];
149 app_init_lpm_tables(void)
151 unsigned socket, lcore;
153 /* Init the LPM tables */
154 for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
158 if (app_is_socket_used(socket) == 0) {
162 struct rte_lpm_config lpm_config;
164 lpm_config.max_rules = APP_MAX_LPM_RULES;
165 lpm_config.number_tbl8s = 256;
166 lpm_config.flags = 0;
167 snprintf(name, sizeof(name), "lpm_table_%u", socket);
168 printf("Creating the LPM table for socket %u ...\n", socket);
169 app.lpm_tables[socket] = rte_lpm_create(
173 if (app.lpm_tables[socket] == NULL) {
174 rte_panic("Unable to create LPM table on socket %u\n", socket);
177 for (rule = 0; rule < app.n_lpm_rules; rule ++) {
180 ret = rte_lpm_add(app.lpm_tables[socket],
181 app.lpm_rules[rule].ip,
182 app.lpm_rules[rule].depth,
183 app.lpm_rules[rule].if_out);
186 rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n",
188 (unsigned) app.lpm_rules[rule].ip,
189 (unsigned) app.lpm_rules[rule].depth,
190 (unsigned) app.lpm_rules[rule].if_out,
198 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
199 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
203 socket = rte_lcore_to_socket_id(lcore);
204 app.lcore_params[lcore].worker.lpm_table = app.lpm_tables[socket];
209 app_init_rings_rx(void)
213 /* Initialize the rings for the RX side */
214 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
215 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
216 unsigned socket_io, lcore_worker;
218 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
219 (lp_io->rx.n_nic_queues == 0)) {
223 socket_io = rte_lcore_to_socket_id(lcore);
225 for (lcore_worker = 0; lcore_worker < APP_MAX_LCORES; lcore_worker ++) {
227 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore_worker].worker;
228 struct rte_ring *ring = NULL;
230 if (app.lcore_params[lcore_worker].type != e_APP_LCORE_WORKER) {
234 printf("Creating ring to connect I/O lcore %u (socket %u) with worker lcore %u ...\n",
238 snprintf(name, sizeof(name), "app_ring_rx_s%u_io%u_w%u",
242 ring = rte_ring_create(
246 RING_F_SP_ENQ | RING_F_SC_DEQ);
248 rte_panic("Cannot create ring to connect I/O core %u with worker core %u\n",
253 lp_io->rx.rings[lp_io->rx.n_rings] = ring;
254 lp_io->rx.n_rings ++;
256 lp_worker->rings_in[lp_worker->n_rings_in] = ring;
257 lp_worker->n_rings_in ++;
261 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
262 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
264 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
265 (lp_io->rx.n_nic_queues == 0)) {
269 if (lp_io->rx.n_rings != app_get_lcores_worker()) {
270 rte_panic("Algorithmic error (I/O RX rings)\n");
274 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
275 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
277 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
281 if (lp_worker->n_rings_in != app_get_lcores_io_rx()) {
282 rte_panic("Algorithmic error (worker input rings)\n");
288 app_init_rings_tx(void)
292 /* Initialize the rings for the TX side */
293 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
294 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
297 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
301 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
303 struct app_lcore_params_io *lp_io = NULL;
304 struct rte_ring *ring;
305 uint32_t socket_io, lcore_io;
307 if (app.nic_tx_port_mask[port] == 0) {
311 if (app_get_lcore_for_nic_tx(port, &lcore_io) < 0) {
312 rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
316 lp_io = &app.lcore_params[lcore_io].io;
317 socket_io = rte_lcore_to_socket_id(lcore_io);
319 printf("Creating ring to connect worker lcore %u with TX port %u (through I/O lcore %u) (socket %u) ...\n",
320 lcore, port, (unsigned)lcore_io, (unsigned)socket_io);
321 snprintf(name, sizeof(name), "app_ring_tx_s%u_w%u_p%u", socket_io, lcore, port);
322 ring = rte_ring_create(
326 RING_F_SP_ENQ | RING_F_SC_DEQ);
328 rte_panic("Cannot create ring to connect worker core %u with TX port %u\n",
333 lp_worker->rings_out[port] = ring;
334 lp_io->tx.rings[port][lp_worker->worker_id] = ring;
338 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
339 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
342 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
343 (lp_io->tx.n_nic_ports == 0)) {
347 for (i = 0; i < lp_io->tx.n_nic_ports; i ++){
350 port = lp_io->tx.nic_ports[i];
351 for (j = 0; j < app_get_lcores_worker(); j ++) {
352 if (lp_io->tx.rings[port][j] == NULL) {
353 rte_panic("Algorithmic error (I/O TX rings)\n");
360 /* Check the link status of all ports in up to 9s, and print them finally */
362 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
364 #define CHECK_INTERVAL 100 /* 100ms */
365 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
367 uint8_t count, all_ports_up, print_flag = 0;
368 struct rte_eth_link link;
369 uint32_t n_rx_queues, n_tx_queues;
371 printf("\nChecking link status");
373 for (count = 0; count <= MAX_CHECK_TIME; count++) {
375 for (portid = 0; portid < port_num; portid++) {
376 if ((port_mask & (1 << portid)) == 0)
378 n_rx_queues = app_get_nic_rx_queues_per_port(portid);
379 n_tx_queues = app.nic_tx_port_mask[portid];
380 if ((n_rx_queues == 0) && (n_tx_queues == 0))
382 memset(&link, 0, sizeof(link));
383 rte_eth_link_get_nowait(portid, &link);
384 /* print link status if flag set */
385 if (print_flag == 1) {
386 if (link.link_status)
388 "Port%d Link Up - speed %uMbps - %s\n",
389 portid, link.link_speed,
390 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
391 ("full-duplex") : ("half-duplex\n"));
393 printf("Port %d Link Down\n", portid);
396 /* clear all_ports_up flag if any link down */
397 if (link.link_status == ETH_LINK_DOWN) {
402 /* after finally printing all link status, get out */
406 if (all_ports_up == 0) {
409 rte_delay_ms(CHECK_INTERVAL);
412 /* set the print_flag if all ports up or timeout */
413 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
428 uint32_t n_rx_queues, n_tx_queues;
430 /* Init NIC ports and queues, then start the ports */
431 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
432 struct rte_mempool *pool;
433 uint16_t nic_rx_ring_size;
434 uint16_t nic_tx_ring_size;
436 n_rx_queues = app_get_nic_rx_queues_per_port(port);
437 n_tx_queues = app.nic_tx_port_mask[port];
439 if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
444 printf("Initializing NIC port %u ...\n", port);
445 ret = rte_eth_dev_configure(
447 (uint8_t) n_rx_queues,
448 (uint8_t) n_tx_queues,
451 rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
453 rte_eth_promiscuous_enable(port);
455 nic_rx_ring_size = app.nic_rx_ring_size;
456 nic_tx_ring_size = app.nic_tx_ring_size;
457 ret = rte_eth_dev_adjust_nb_rx_tx_desc(
458 port, &nic_rx_ring_size, &nic_tx_ring_size);
460 rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
463 app.nic_rx_ring_size = nic_rx_ring_size;
464 app.nic_tx_ring_size = nic_tx_ring_size;
467 for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
468 if (app.nic_rx_queue_mask[port][queue] == 0) {
472 app_get_lcore_for_nic_rx(port, queue, &lcore);
473 socket = rte_lcore_to_socket_id(lcore);
474 pool = app.lcore_params[lcore].pool;
476 printf("Initializing NIC port %u RX queue %u ...\n",
478 ret = rte_eth_rx_queue_setup(
481 (uint16_t) app.nic_rx_ring_size,
486 rte_panic("Cannot init RX queue %u for port %u (%d)\n",
492 if (app.nic_tx_port_mask[port] == 1) {
493 app_get_lcore_for_nic_tx(port, &lcore);
494 socket = rte_lcore_to_socket_id(lcore);
495 printf("Initializing NIC port %u TX queue 0 ...\n",
497 ret = rte_eth_tx_queue_setup(
500 (uint16_t) app.nic_tx_ring_size,
504 rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
511 ret = rte_eth_dev_start(port);
513 rte_panic("Cannot start port %d (%d)\n", port, ret);
517 check_all_ports_link_status(APP_MAX_NIC_PORTS, (~0x0));
523 app_assign_worker_ids();
524 app_init_mbuf_pools();
525 app_init_lpm_tables();
530 printf("Initialization completed.\n");