4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/types.h>
41 #include <sys/queue.h>
46 #include <rte_common.h>
47 #include <rte_byteorder.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_tailq.h>
54 #include <rte_per_lcore.h>
55 #include <rte_launch.h>
56 #include <rte_atomic.h>
57 #include <rte_cycles.h>
58 #include <rte_prefetch.h>
59 #include <rte_lcore.h>
60 #include <rte_per_lcore.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_interrupts.h>
64 #include <rte_random.h>
65 #include <rte_debug.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
69 #include <rte_mempool.h>
71 #include <rte_string_fns.h>
78 static struct rte_eth_conf port_conf = {
81 .header_split = 0, /**< Header Split disabled */
82 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
83 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
84 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
85 .hw_strip_crc = 0, /**< CRC stripped by hardware */
90 .rss_hf = ETH_RSS_IPV4,
97 static struct rte_eth_rxconf rx_conf = {
99 .pthresh = APP_DEFAULT_NIC_RX_PTHRESH,
100 .hthresh = APP_DEFAULT_NIC_RX_HTHRESH,
101 .wthresh = APP_DEFAULT_NIC_RX_WTHRESH,
103 .rx_free_thresh = APP_DEFAULT_NIC_RX_FREE_THRESH,
106 static struct rte_eth_txconf tx_conf = {
108 .pthresh = APP_DEFAULT_NIC_TX_PTHRESH,
109 .hthresh = APP_DEFAULT_NIC_TX_HTHRESH,
110 .wthresh = APP_DEFAULT_NIC_TX_WTHRESH,
112 .tx_free_thresh = APP_DEFAULT_NIC_TX_FREE_THRESH,
113 .tx_rs_thresh = APP_DEFAULT_NIC_TX_RS_THRESH,
117 app_assign_worker_ids(void)
119 uint32_t lcore, worker_id;
121 /* Assign ID for each worker */
123 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
124 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
126 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
130 lp_worker->worker_id = worker_id;
136 app_init_mbuf_pools(void)
138 uint32_t socket, lcore;
140 /* Init the buffer pools */
141 for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
143 if (app_is_socket_used(socket) == 0) {
147 rte_snprintf(name, sizeof(name), "mbuf_pool_%u", socket);
148 printf("Creating the mbuf pool for socket %u ...\n", socket);
149 app.pools[socket] = rte_mempool_create(
151 APP_DEFAULT_MEMPOOL_BUFFERS,
152 APP_DEFAULT_MBUF_SIZE,
153 APP_DEFAULT_MEMPOOL_CACHE_SIZE,
154 sizeof(struct rte_pktmbuf_pool_private),
155 rte_pktmbuf_pool_init, NULL,
156 rte_pktmbuf_init, NULL,
159 if (app.pools[socket] == NULL) {
160 rte_panic("Cannot create mbuf pool on socket %u\n", socket);
164 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
165 if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) {
169 socket = rte_lcore_to_socket_id(lcore);
170 app.lcore_params[lcore].pool = app.pools[socket];
175 app_init_lpm_tables(void)
177 uint32_t socket, lcore;
179 /* Init the LPM tables */
180 for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
184 if (app_is_socket_used(socket) == 0) {
188 rte_snprintf(name, sizeof(name), "lpm_table_%u", socket);
189 printf("Creating the LPM table for socket %u ...\n", socket);
190 app.lpm_tables[socket] = rte_lpm_create(
195 if (app.lpm_tables[socket] == NULL) {
196 rte_panic("Unable to create LPM table on socket %u\n", socket);
199 for (rule = 0; rule < app.n_lpm_rules; rule ++) {
202 ret = rte_lpm_add(app.lpm_tables[socket],
203 app.lpm_rules[rule].ip,
204 app.lpm_rules[rule].depth,
205 app.lpm_rules[rule].if_out);
208 rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n",
209 rule, app.lpm_rules[rule].ip,
210 (uint32_t) app.lpm_rules[rule].depth,
211 (uint32_t) app.lpm_rules[rule].if_out,
219 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
220 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
224 socket = rte_lcore_to_socket_id(lcore);
225 app.lcore_params[lcore].worker.lpm_table = app.lpm_tables[socket];
230 app_init_rings_rx(void)
234 /* Initialize the rings for the RX side */
235 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
236 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
237 uint32_t socket_io, lcore_worker;
239 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
240 (lp_io->rx.n_nic_queues == 0)) {
244 socket_io = rte_lcore_to_socket_id(lcore);
246 for (lcore_worker = 0; lcore_worker < APP_MAX_LCORES; lcore_worker ++) {
248 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore_worker].worker;
249 struct rte_ring *ring = NULL;
251 if (app.lcore_params[lcore_worker].type != e_APP_LCORE_WORKER) {
255 printf("Creating ring to connect I/O lcore %u (socket %u) with worker lcore %u ...\n",
259 rte_snprintf(name, sizeof(name), "app_ring_rx_s%u_io%u_w%u",
263 ring = rte_ring_create(
267 RING_F_SP_ENQ | RING_F_SC_DEQ);
269 rte_panic("Cannot create ring to connect I/O core %u with worker core %u\n",
274 lp_io->rx.rings[lp_io->rx.n_rings] = ring;
275 lp_io->rx.n_rings ++;
277 lp_worker->rings_in[lp_worker->n_rings_in] = ring;
278 lp_worker->n_rings_in ++;
282 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
283 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
285 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
286 (lp_io->rx.n_nic_queues == 0)) {
290 if (lp_io->rx.n_rings != app_get_lcores_worker()) {
291 rte_panic("Algorithmic error (I/O RX rings)\n");
295 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
296 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
298 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
302 if (lp_worker->n_rings_in != app_get_lcores_io_rx()) {
303 rte_panic("Algorithmic error (worker input rings)\n");
309 app_init_rings_tx(void)
313 /* Initialize the rings for the TX side */
314 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
315 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
318 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
322 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
324 struct app_lcore_params_io *lp_io = NULL;
325 struct rte_ring *ring;
326 uint32_t socket_io, lcore_io;
328 if (app.nic_tx_port_mask[port] == 0) {
332 if (app_get_lcore_for_nic_tx((uint8_t) port, &lcore_io) < 0) {
333 rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
337 lp_io = &app.lcore_params[lcore_io].io;
338 socket_io = rte_lcore_to_socket_id(lcore_io);
340 printf("Creating ring to connect worker lcore %u with TX port %u (through I/O lcore %u) (socket %u) ...\n",
341 lcore, port, lcore_io, socket_io);
342 rte_snprintf(name, sizeof(name), "app_ring_tx_s%u_w%u_p%u", socket_io, lcore, port);
343 ring = rte_ring_create(
347 RING_F_SP_ENQ | RING_F_SC_DEQ);
349 rte_panic("Cannot create ring to connect worker core %u with TX port %u\n",
354 lp_worker->rings_out[port] = ring;
355 lp_io->tx.rings[port][lp_worker->worker_id] = ring;
359 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
360 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
363 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
364 (lp_io->tx.n_nic_ports == 0)) {
368 for (i = 0; i < lp_io->tx.n_nic_ports; i ++){
371 port = lp_io->tx.nic_ports[i];
372 for (j = 0; j < app_get_lcores_worker(); j ++) {
373 if (lp_io->tx.rings[port][j] == NULL) {
374 rte_panic("Algorithmic error (I/O TX rings)\n");
384 uint32_t socket, lcore;
389 printf("Initializing the PMD driver ...\n");
390 #ifdef RTE_LIBRTE_IGB_PMD
391 if (rte_igb_pmd_init() < 0) {
392 rte_panic("Cannot init IGB PMD\n");
395 #ifdef RTE_LIBRTE_IXGBE_PMD
396 if (rte_ixgbe_pmd_init() < 0) {
397 rte_panic("Cannot init IXGBE PMD\n");
400 if (rte_eal_pci_probe() < 0) {
401 rte_panic("Cannot probe PCI\n");
404 /* Init NIC ports and queues, then start the ports */
405 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
406 struct rte_eth_link link;
407 struct rte_mempool *pool;
408 uint32_t n_rx_queues, n_tx_queues;
410 n_rx_queues = app_get_nic_rx_queues_per_port(port);
411 n_tx_queues = app.nic_tx_port_mask[port];
413 if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
418 printf("Initializing NIC port %u ...\n", (uint32_t) port);
419 ret = rte_eth_dev_configure(
421 (uint8_t) n_rx_queues,
422 (uint8_t) n_tx_queues,
425 rte_panic("Cannot init NIC port %u (%d)\n", (uint32_t) port, ret);
427 rte_eth_promiscuous_enable(port);
430 for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
431 if (app.nic_rx_queue_mask[port][queue] == 0) {
435 app_get_lcore_for_nic_rx(port, queue, &lcore);
436 socket = rte_lcore_to_socket_id(lcore);
437 pool = app.lcore_params[lcore].pool;
439 printf("Initializing NIC port %u RX queue %u ...\n",
442 ret = rte_eth_rx_queue_setup(
445 (uint16_t) app.nic_rx_ring_size,
450 rte_panic("Cannot init RX queue %u for port %u (%d)\n",
458 if (app.nic_tx_port_mask[port] == 1) {
459 app_get_lcore_for_nic_tx(port, &lcore);
460 socket = rte_lcore_to_socket_id(lcore);
461 printf("Initializing NIC port %u TX queue 0 ...\n",
463 ret = rte_eth_tx_queue_setup(
466 (uint16_t) app.nic_tx_ring_size,
470 rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
477 ret = rte_eth_dev_start(port);
479 rte_panic("Cannot start port %d (%d)\n", port, ret);
482 /* Get link status */
483 rte_eth_link_get(port, &link);
484 if (link.link_status) {
485 printf("Port %u is UP (%u Mbps)\n",
487 (unsigned) link.link_speed);
489 printf("Port %u is DOWN\n",
498 app_assign_worker_ids();
499 app_init_mbuf_pools();
500 app_init_lpm_tables();
505 printf("Initialization completed.\n");