4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/types.h>
41 #include <sys/queue.h>
46 #include <rte_common.h>
47 #include <rte_byteorder.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_tailq.h>
54 #include <rte_per_lcore.h>
55 #include <rte_launch.h>
56 #include <rte_atomic.h>
57 #include <rte_cycles.h>
58 #include <rte_prefetch.h>
59 #include <rte_lcore.h>
60 #include <rte_per_lcore.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_interrupts.h>
64 #include <rte_random.h>
65 #include <rte_debug.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
69 #include <rte_mempool.h>
71 #include <rte_string_fns.h>
78 static struct rte_eth_conf port_conf = {
81 .header_split = 0, /**< Header Split disabled */
82 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
83 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
84 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
85 .hw_strip_crc = 0, /**< CRC stripped by hardware */
90 .rss_hf = ETH_RSS_IPV4,
94 .mq_mode = ETH_DCB_NONE,
98 static struct rte_eth_rxconf rx_conf = {
100 .pthresh = APP_DEFAULT_NIC_RX_PTHRESH,
101 .hthresh = APP_DEFAULT_NIC_RX_HTHRESH,
102 .wthresh = APP_DEFAULT_NIC_RX_WTHRESH,
104 .rx_free_thresh = APP_DEFAULT_NIC_RX_FREE_THRESH,
107 static struct rte_eth_txconf tx_conf = {
109 .pthresh = APP_DEFAULT_NIC_TX_PTHRESH,
110 .hthresh = APP_DEFAULT_NIC_TX_HTHRESH,
111 .wthresh = APP_DEFAULT_NIC_TX_WTHRESH,
113 .tx_free_thresh = APP_DEFAULT_NIC_TX_FREE_THRESH,
114 .tx_rs_thresh = APP_DEFAULT_NIC_TX_RS_THRESH,
118 app_assign_worker_ids(void)
120 uint32_t lcore, worker_id;
122 /* Assign ID for each worker */
124 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
125 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
127 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
131 lp_worker->worker_id = worker_id;
137 app_init_mbuf_pools(void)
139 uint32_t socket, lcore;
141 /* Init the buffer pools */
142 for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
144 if (app_is_socket_used(socket) == 0) {
148 rte_snprintf(name, sizeof(name), "mbuf_pool_%u", socket);
149 printf("Creating the mbuf pool for socket %u ...\n", socket);
150 app.pools[socket] = rte_mempool_create(
152 APP_DEFAULT_MEMPOOL_BUFFERS,
153 APP_DEFAULT_MBUF_SIZE,
154 APP_DEFAULT_MEMPOOL_CACHE_SIZE,
155 sizeof(struct rte_pktmbuf_pool_private),
156 rte_pktmbuf_pool_init, NULL,
157 rte_pktmbuf_init, NULL,
160 if (app.pools[socket] == NULL) {
161 rte_panic("Cannot create mbuf pool on socket %u\n", socket);
165 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
166 if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) {
170 socket = rte_lcore_to_socket_id(lcore);
171 app.lcore_params[lcore].pool = app.pools[socket];
176 app_init_lpm_tables(void)
178 uint32_t socket, lcore;
180 /* Init the LPM tables */
181 for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
185 if (app_is_socket_used(socket) == 0) {
189 rte_snprintf(name, sizeof(name), "lpm_table_%u", socket);
190 printf("Creating the LPM table for socket %u ...\n", socket);
191 app.lpm_tables[socket] = rte_lpm_create(
196 if (app.lpm_tables[socket] == NULL) {
197 rte_panic("Unable to create LPM table on socket %u\n", socket);
200 for (rule = 0; rule < app.n_lpm_rules; rule ++) {
203 ret = rte_lpm_add(app.lpm_tables[socket],
204 app.lpm_rules[rule].ip,
205 app.lpm_rules[rule].depth,
206 app.lpm_rules[rule].if_out);
209 rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n",
210 rule, app.lpm_rules[rule].ip,
211 (uint32_t) app.lpm_rules[rule].depth,
212 (uint32_t) app.lpm_rules[rule].if_out,
220 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
221 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
225 socket = rte_lcore_to_socket_id(lcore);
226 app.lcore_params[lcore].worker.lpm_table = app.lpm_tables[socket];
231 app_init_rings_rx(void)
235 /* Initialize the rings for the RX side */
236 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
237 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
238 uint32_t socket_io, lcore_worker;
240 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
241 (lp_io->rx.n_nic_queues == 0)) {
245 socket_io = rte_lcore_to_socket_id(lcore);
247 for (lcore_worker = 0; lcore_worker < APP_MAX_LCORES; lcore_worker ++) {
249 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore_worker].worker;
250 struct rte_ring *ring = NULL;
252 if (app.lcore_params[lcore_worker].type != e_APP_LCORE_WORKER) {
256 printf("Creating ring to connect I/O lcore %u (socket %u) with worker lcore %u ...\n",
260 rte_snprintf(name, sizeof(name), "app_ring_rx_s%u_io%u_w%u",
264 ring = rte_ring_create(
268 RING_F_SP_ENQ | RING_F_SC_DEQ);
270 rte_panic("Cannot create ring to connect I/O core %u with worker core %u\n",
275 lp_io->rx.rings[lp_io->rx.n_rings] = ring;
276 lp_io->rx.n_rings ++;
278 lp_worker->rings_in[lp_worker->n_rings_in] = ring;
279 lp_worker->n_rings_in ++;
283 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
284 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
286 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
287 (lp_io->rx.n_nic_queues == 0)) {
291 if (lp_io->rx.n_rings != app_get_lcores_worker()) {
292 rte_panic("Algorithmic error (I/O RX rings)\n");
296 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
297 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
299 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
303 if (lp_worker->n_rings_in != app_get_lcores_io_rx()) {
304 rte_panic("Algorithmic error (worker input rings)\n");
310 app_init_rings_tx(void)
314 /* Initialize the rings for the TX side */
315 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
316 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
319 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
323 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
325 struct app_lcore_params_io *lp_io = NULL;
326 struct rte_ring *ring;
327 uint32_t socket_io, lcore_io;
329 if (app.nic_tx_port_mask[port] == 0) {
333 if (app_get_lcore_for_nic_tx((uint8_t) port, &lcore_io) < 0) {
334 rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
338 lp_io = &app.lcore_params[lcore_io].io;
339 socket_io = rte_lcore_to_socket_id(lcore_io);
341 printf("Creating ring to connect worker lcore %u with TX port %u (through I/O lcore %u) (socket %u) ...\n",
342 lcore, port, lcore_io, socket_io);
343 rte_snprintf(name, sizeof(name), "app_ring_tx_s%u_w%u_p%u", socket_io, lcore, port);
344 ring = rte_ring_create(
348 RING_F_SP_ENQ | RING_F_SC_DEQ);
350 rte_panic("Cannot create ring to connect worker core %u with TX port %u\n",
355 lp_worker->rings_out[port] = ring;
356 lp_io->tx.rings[port][lp_worker->worker_id] = ring;
360 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
361 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
364 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
365 (lp_io->tx.n_nic_ports == 0)) {
369 for (i = 0; i < lp_io->tx.n_nic_ports; i ++){
372 port = lp_io->tx.nic_ports[i];
373 for (j = 0; j < app_get_lcores_worker(); j ++) {
374 if (lp_io->tx.rings[port][j] == NULL) {
375 rte_panic("Algorithmic error (I/O TX rings)\n");
385 uint32_t socket, lcore;
390 printf("Initializing the PMD driver ...\n");
391 if (rte_pmd_init_all() < 0) {
392 rte_panic("Cannot init PMD\n");
395 if (rte_eal_pci_probe() < 0) {
396 rte_panic("Cannot probe PCI\n");
399 /* Init NIC ports and queues, then start the ports */
400 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
401 struct rte_eth_link link;
402 struct rte_mempool *pool;
403 uint32_t n_rx_queues, n_tx_queues;
405 n_rx_queues = app_get_nic_rx_queues_per_port(port);
406 n_tx_queues = app.nic_tx_port_mask[port];
408 if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
413 printf("Initializing NIC port %u ...\n", (uint32_t) port);
414 ret = rte_eth_dev_configure(
416 (uint8_t) n_rx_queues,
417 (uint8_t) n_tx_queues,
420 rte_panic("Cannot init NIC port %u (%d)\n", (uint32_t) port, ret);
422 rte_eth_promiscuous_enable(port);
425 for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
426 if (app.nic_rx_queue_mask[port][queue] == 0) {
430 app_get_lcore_for_nic_rx(port, queue, &lcore);
431 socket = rte_lcore_to_socket_id(lcore);
432 pool = app.lcore_params[lcore].pool;
434 printf("Initializing NIC port %u RX queue %u ...\n",
437 ret = rte_eth_rx_queue_setup(
440 (uint16_t) app.nic_rx_ring_size,
445 rte_panic("Cannot init RX queue %u for port %u (%d)\n",
453 if (app.nic_tx_port_mask[port] == 1) {
454 app_get_lcore_for_nic_tx(port, &lcore);
455 socket = rte_lcore_to_socket_id(lcore);
456 printf("Initializing NIC port %u TX queue 0 ...\n",
458 ret = rte_eth_tx_queue_setup(
461 (uint16_t) app.nic_tx_ring_size,
465 rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
472 ret = rte_eth_dev_start(port);
474 rte_panic("Cannot start port %d (%d)\n", port, ret);
477 /* Get link status */
478 rte_eth_link_get(port, &link);
479 if (link.link_status) {
480 printf("Port %u is UP (%u Mbps)\n",
482 (unsigned) link.link_speed);
484 printf("Port %u is DOWN\n",
493 app_assign_worker_ids();
494 app_init_mbuf_pools();
495 app_init_lpm_tables();
500 printf("Initialization completed.\n");