remove version in all files
[dpdk.git] / examples / load_balancer / init.c
1 /*-
2  *   BSD LICENSE
3  * 
4  *   Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  * 
7  *   Redistribution and use in source and binary forms, with or without 
8  *   modification, are permitted provided that the following conditions 
9  *   are met:
10  * 
11  *     * Redistributions of source code must retain the above copyright 
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright 
14  *       notice, this list of conditions and the following disclaimer in 
15  *       the documentation and/or other materials provided with the 
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its 
18  *       contributors may be used to endorse or promote products derived 
19  *       from this software without specific prior written permission.
20  * 
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  * 
33  */
34
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <stdint.h>
38 #include <inttypes.h>
39 #include <sys/types.h>
40 #include <string.h>
41 #include <sys/queue.h>
42 #include <stdarg.h>
43 #include <errno.h>
44 #include <getopt.h>
45
46 #include <rte_common.h>
47 #include <rte_byteorder.h>
48 #include <rte_log.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_tailq.h>
53 #include <rte_eal.h>
54 #include <rte_per_lcore.h>
55 #include <rte_launch.h>
56 #include <rte_atomic.h>
57 #include <rte_cycles.h>
58 #include <rte_prefetch.h>
59 #include <rte_lcore.h>
60 #include <rte_per_lcore.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_interrupts.h>
63 #include <rte_pci.h>
64 #include <rte_random.h>
65 #include <rte_debug.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
68 #include <rte_ring.h>
69 #include <rte_mempool.h>
70 #include <rte_mbuf.h>
71 #include <rte_string_fns.h>
72 #include <rte_ip.h>
73 #include <rte_tcp.h>
74 #include <rte_lpm.h>
75
76 #include "main.h"
77
78 static struct rte_eth_conf port_conf = {
79         .rxmode = {
80                 .split_hdr_size = 0,
81                 .header_split   = 0, /**< Header Split disabled */
82                 .hw_ip_checksum = 1, /**< IP checksum offload enabled */
83                 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
84                 .jumbo_frame    = 0, /**< Jumbo Frame Support disabled */
85                 .hw_strip_crc   = 0, /**< CRC stripped by hardware */
86         },
87         .rx_adv_conf = {
88                 .rss_conf = {
89                         .rss_key = NULL,
90                         .rss_hf = ETH_RSS_IPV4,
91                 },
92         },
93         .txmode = {
94         },
95 };
96
97 static struct rte_eth_rxconf rx_conf = {
98         .rx_thresh = {
99                 .pthresh = APP_DEFAULT_NIC_RX_PTHRESH,
100                 .hthresh = APP_DEFAULT_NIC_RX_HTHRESH,
101                 .wthresh = APP_DEFAULT_NIC_RX_WTHRESH,
102         },
103         .rx_free_thresh = APP_DEFAULT_NIC_RX_FREE_THRESH,
104 };
105
106 static struct rte_eth_txconf tx_conf = {
107         .tx_thresh = {
108                 .pthresh = APP_DEFAULT_NIC_TX_PTHRESH,
109                 .hthresh = APP_DEFAULT_NIC_TX_HTHRESH,
110                 .wthresh = APP_DEFAULT_NIC_TX_WTHRESH,
111         },
112         .tx_free_thresh = APP_DEFAULT_NIC_TX_FREE_THRESH,
113         .tx_rs_thresh = APP_DEFAULT_NIC_TX_RS_THRESH,
114 };
115
116 static void
117 app_assign_worker_ids(void)
118 {
119         uint32_t lcore, worker_id;
120
121         /* Assign ID for each worker */
122         worker_id = 0;
123         for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
124                 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
125
126                 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
127                         continue;
128                 }
129
130                 lp_worker->worker_id = worker_id;
131                 worker_id ++;
132         }
133 }
134
135 static void
136 app_init_mbuf_pools(void)
137 {
138         uint32_t socket, lcore;
139
140         /* Init the buffer pools */
141         for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
142                 char name[32];
143                 if (app_is_socket_used(socket) == 0) {
144                         continue;
145                 }
146
147                 rte_snprintf(name, sizeof(name), "mbuf_pool_%u", socket);
148                 printf("Creating the mbuf pool for socket %u ...\n", socket);
149                 app.pools[socket] = rte_mempool_create(
150                         name,
151                         APP_DEFAULT_MEMPOOL_BUFFERS,
152                         APP_DEFAULT_MBUF_SIZE,
153                         APP_DEFAULT_MEMPOOL_CACHE_SIZE,
154                         sizeof(struct rte_pktmbuf_pool_private),
155                         rte_pktmbuf_pool_init, NULL,
156                         rte_pktmbuf_init, NULL,
157                         socket,
158                         0);
159                 if (app.pools[socket] == NULL) {
160                         rte_panic("Cannot create mbuf pool on socket %u\n", socket);
161                 }
162         }
163
164         for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
165                 if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) {
166                         continue;
167                 }
168
169                 socket = rte_lcore_to_socket_id(lcore);
170                 app.lcore_params[lcore].pool = app.pools[socket];
171         }
172 }
173
174 static void
175 app_init_lpm_tables(void)
176 {
177         uint32_t socket, lcore;
178
179         /* Init the LPM tables */
180         for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
181                 char name[32];
182                 uint32_t rule;
183
184                 if (app_is_socket_used(socket) == 0) {
185                         continue;
186                 }
187
188                 rte_snprintf(name, sizeof(name), "lpm_table_%u", socket);
189                 printf("Creating the LPM table for socket %u ...\n", socket);
190                 app.lpm_tables[socket] = rte_lpm_create(
191                         name,
192                         socket,
193                         APP_MAX_LPM_RULES,
194                         RTE_LPM_MEMZONE);
195                 if (app.lpm_tables[socket] == NULL) {
196                         rte_panic("Unable to create LPM table on socket %u\n", socket);
197                 }
198
199                 for (rule = 0; rule < app.n_lpm_rules; rule ++) {
200                         int ret;
201
202                         ret = rte_lpm_add(app.lpm_tables[socket],
203                                 app.lpm_rules[rule].ip,
204                                 app.lpm_rules[rule].depth,
205                                 app.lpm_rules[rule].if_out);
206
207                         if (ret < 0) {
208                                 rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n",
209                                         rule, app.lpm_rules[rule].ip,
210                                         (uint32_t) app.lpm_rules[rule].depth,
211                                         (uint32_t) app.lpm_rules[rule].if_out,
212                                         socket,
213                                         ret);
214                         }
215                 }
216
217         }
218
219         for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
220                 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
221                         continue;
222                 }
223
224                 socket = rte_lcore_to_socket_id(lcore);
225                 app.lcore_params[lcore].worker.lpm_table = app.lpm_tables[socket];
226         }
227 }
228
229 static void
230 app_init_rings_rx(void)
231 {
232         uint32_t lcore;
233
234         /* Initialize the rings for the RX side */
235         for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
236                 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
237                 uint32_t socket_io, lcore_worker;
238
239                 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
240                     (lp_io->rx.n_nic_queues == 0)) {
241                         continue;
242                 }
243
244                 socket_io = rte_lcore_to_socket_id(lcore);
245
246                 for (lcore_worker = 0; lcore_worker < APP_MAX_LCORES; lcore_worker ++) {
247                         char name[32];
248                         struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore_worker].worker;
249                         struct rte_ring *ring = NULL;
250
251                         if (app.lcore_params[lcore_worker].type != e_APP_LCORE_WORKER) {
252                                 continue;
253                         }
254
255                         printf("Creating ring to connect I/O lcore %u (socket %u) with worker lcore %u ...\n",
256                                 lcore,
257                                 socket_io,
258                                 lcore_worker);
259                         rte_snprintf(name, sizeof(name), "app_ring_rx_s%u_io%u_w%u",
260                                 socket_io,
261                                 lcore,
262                                 lcore_worker);
263                         ring = rte_ring_create(
264                                 name,
265                                 app.ring_rx_size,
266                                 socket_io,
267                                 RING_F_SP_ENQ | RING_F_SC_DEQ);
268                         if (ring == NULL) {
269                                 rte_panic("Cannot create ring to connect I/O core %u with worker core %u\n",
270                                         lcore,
271                                         lcore_worker);
272                         }
273
274                         lp_io->rx.rings[lp_io->rx.n_rings] = ring;
275                         lp_io->rx.n_rings ++;
276
277                         lp_worker->rings_in[lp_worker->n_rings_in] = ring;
278                         lp_worker->n_rings_in ++;
279                 }
280         }
281
282         for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
283                 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
284
285                 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
286                     (lp_io->rx.n_nic_queues == 0)) {
287                         continue;
288                 }
289
290                 if (lp_io->rx.n_rings != app_get_lcores_worker()) {
291                         rte_panic("Algorithmic error (I/O RX rings)\n");
292                 }
293         }
294
295         for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
296                 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
297
298                 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
299                         continue;
300                 }
301
302                 if (lp_worker->n_rings_in != app_get_lcores_io_rx()) {
303                         rte_panic("Algorithmic error (worker input rings)\n");
304                 }
305         }
306 }
307
308 static void
309 app_init_rings_tx(void)
310 {
311         uint32_t lcore;
312
313         /* Initialize the rings for the TX side */
314         for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
315                 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
316                 uint32_t port;
317
318                 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
319                         continue;
320                 }
321
322                 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
323                         char name[32];
324                         struct app_lcore_params_io *lp_io = NULL;
325                         struct rte_ring *ring;
326                         uint32_t socket_io, lcore_io;
327
328                         if (app.nic_tx_port_mask[port] == 0) {
329                                 continue;
330                         }
331
332                         if (app_get_lcore_for_nic_tx((uint8_t) port, &lcore_io) < 0) {
333                                 rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
334                                         port);
335                         }
336
337                         lp_io = &app.lcore_params[lcore_io].io;
338                         socket_io = rte_lcore_to_socket_id(lcore_io);
339
340                         printf("Creating ring to connect worker lcore %u with TX port %u (through I/O lcore %u) (socket %u) ...\n",
341                                 lcore, port, lcore_io, socket_io);
342                         rte_snprintf(name, sizeof(name), "app_ring_tx_s%u_w%u_p%u", socket_io, lcore, port);
343                         ring = rte_ring_create(
344                                 name,
345                                 app.ring_tx_size,
346                                 socket_io,
347                                 RING_F_SP_ENQ | RING_F_SC_DEQ);
348                         if (ring == NULL) {
349                                 rte_panic("Cannot create ring to connect worker core %u with TX port %u\n",
350                                         lcore,
351                                         port);
352                         }
353
354                         lp_worker->rings_out[port] = ring;
355                         lp_io->tx.rings[port][lp_worker->worker_id] = ring;
356                 }
357         }
358
359         for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
360                 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
361                 uint32_t i;
362
363                 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
364                     (lp_io->tx.n_nic_ports == 0)) {
365                         continue;
366                 }
367
368                 for (i = 0; i < lp_io->tx.n_nic_ports; i ++){
369                         uint32_t port, j;
370
371                         port = lp_io->tx.nic_ports[i];
372                         for (j = 0; j < app_get_lcores_worker(); j ++) {
373                                 if (lp_io->tx.rings[port][j] == NULL) {
374                                         rte_panic("Algorithmic error (I/O TX rings)\n");
375                                 }
376                         }
377                 }
378         }
379 }
380
381 static void
382 app_init_nics(void)
383 {
384         uint32_t socket, lcore;
385         uint8_t port, queue;
386         int ret;
387
388         /* Init driver */
389         printf("Initializing the PMD driver ...\n");
390 #ifdef RTE_LIBRTE_IGB_PMD
391         if (rte_igb_pmd_init() < 0) {
392                 rte_panic("Cannot init IGB PMD\n");
393         }
394 #endif
395 #ifdef RTE_LIBRTE_IXGBE_PMD
396         if (rte_ixgbe_pmd_init() < 0) {
397                 rte_panic("Cannot init IXGBE PMD\n");
398         }
399 #endif
400         if (rte_eal_pci_probe() < 0) {
401                 rte_panic("Cannot probe PCI\n");
402         }
403
404         /* Init NIC ports and queues, then start the ports */
405         for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
406                 struct rte_eth_link link;
407                 struct rte_mempool *pool;
408                 uint32_t n_rx_queues, n_tx_queues;
409
410                 n_rx_queues = app_get_nic_rx_queues_per_port(port);
411                 n_tx_queues = app.nic_tx_port_mask[port];
412
413                 if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
414                         continue;
415                 }
416
417                 /* Init port */
418                 printf("Initializing NIC port %u ...\n", (uint32_t) port);
419                 ret = rte_eth_dev_configure(
420                         port,
421                         (uint8_t) n_rx_queues,
422                         (uint8_t) n_tx_queues,
423                         &port_conf);
424                 if (ret < 0) {
425                         rte_panic("Cannot init NIC port %u (%d)\n", (uint32_t) port, ret);
426                 }
427                 rte_eth_promiscuous_enable(port);
428
429                 /* Init RX queues */
430                 for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
431                         if (app.nic_rx_queue_mask[port][queue] == 0) {
432                                 continue;
433                         }
434
435                         app_get_lcore_for_nic_rx(port, queue, &lcore);
436                         socket = rte_lcore_to_socket_id(lcore);
437                         pool = app.lcore_params[lcore].pool;
438
439                         printf("Initializing NIC port %u RX queue %u ...\n",
440                                 (uint32_t) port,
441                                 (uint32_t) queue);
442                         ret = rte_eth_rx_queue_setup(
443                                 port,
444                                 queue,
445                                 (uint16_t) app.nic_rx_ring_size,
446                                 socket,
447                                 &rx_conf,
448                                 pool);
449                         if (ret < 0) {
450                                 rte_panic("Cannot init RX queue %u for port %u (%d)\n",
451                                         (uint32_t) queue,
452                                         (uint32_t) port,
453                                         ret);
454                         }
455                 }
456
457                 /* Init TX queues */
458                 if (app.nic_tx_port_mask[port] == 1) {
459                         app_get_lcore_for_nic_tx(port, &lcore);
460                         socket = rte_lcore_to_socket_id(lcore);
461                         printf("Initializing NIC port %u TX queue 0 ...\n",
462                                 (uint32_t) port);
463                         ret = rte_eth_tx_queue_setup(
464                                 port,
465                                 0,
466                                 (uint16_t) app.nic_tx_ring_size,
467                                 socket,
468                                 &tx_conf);
469                         if (ret < 0) {
470                                 rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
471                                         port,
472                                         ret);
473                         }
474                 }
475
476                 /* Start port */
477                 ret = rte_eth_dev_start(port);
478                 if (ret < 0) {
479                         rte_panic("Cannot start port %d (%d)\n", port, ret);
480                 }
481
482                 /* Get link status */
483                 rte_eth_link_get(port, &link);
484                 if (link.link_status) {
485                         printf("Port %u is UP (%u Mbps)\n",
486                                 (uint32_t) port,
487                                 (unsigned) link.link_speed);
488                 } else {
489                         printf("Port %u is DOWN\n",
490                                 (uint32_t) port);
491                 }
492         }
493 }
494
495 void
496 app_init(void)
497 {
498         app_assign_worker_ids();
499         app_init_mbuf_pools();
500         app_init_lpm_tables();
501         app_init_rings_rx();
502         app_init_rings_tx();
503         app_init_nics();
504
505         printf("Initialization completed.\n");
506 }