1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_memory.h>
20 #include <rte_memcpy.h>
22 #include <rte_launch.h>
23 #include <rte_atomic.h>
24 #include <rte_cycles.h>
25 #include <rte_prefetch.h>
26 #include <rte_lcore.h>
27 #include <rte_per_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_interrupts.h>
30 #include <rte_random.h>
31 #include <rte_debug.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
35 #include <rte_mempool.h>
37 #include <rte_string_fns.h>
44 static struct rte_eth_conf port_conf = {
46 .mq_mode = ETH_MQ_RX_RSS,
48 .ignore_offload_bitfield = 1,
49 .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
50 DEV_RX_OFFLOAD_CRC_STRIP),
59 .mq_mode = ETH_MQ_TX_NONE,
64 app_assign_worker_ids(void)
66 uint32_t lcore, worker_id;
68 /* Assign ID for each worker */
70 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
71 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
73 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
77 lp_worker->worker_id = worker_id;
83 app_init_mbuf_pools(void)
85 unsigned socket, lcore;
87 /* Init the buffer pools */
88 for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
90 if (app_is_socket_used(socket) == 0) {
94 snprintf(name, sizeof(name), "mbuf_pool_%u", socket);
95 printf("Creating the mbuf pool for socket %u ...\n", socket);
96 app.pools[socket] = rte_pktmbuf_pool_create(
97 name, APP_DEFAULT_MEMPOOL_BUFFERS,
98 APP_DEFAULT_MEMPOOL_CACHE_SIZE,
99 0, APP_DEFAULT_MBUF_DATA_SIZE, socket);
100 if (app.pools[socket] == NULL) {
101 rte_panic("Cannot create mbuf pool on socket %u\n", socket);
105 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
106 if (app.lcore_params[lcore].type == e_APP_LCORE_DISABLED) {
110 socket = rte_lcore_to_socket_id(lcore);
111 app.lcore_params[lcore].pool = app.pools[socket];
116 app_init_lpm_tables(void)
118 unsigned socket, lcore;
120 /* Init the LPM tables */
121 for (socket = 0; socket < APP_MAX_SOCKETS; socket ++) {
125 if (app_is_socket_used(socket) == 0) {
129 struct rte_lpm_config lpm_config;
131 lpm_config.max_rules = APP_MAX_LPM_RULES;
132 lpm_config.number_tbl8s = 256;
133 lpm_config.flags = 0;
134 snprintf(name, sizeof(name), "lpm_table_%u", socket);
135 printf("Creating the LPM table for socket %u ...\n", socket);
136 app.lpm_tables[socket] = rte_lpm_create(
140 if (app.lpm_tables[socket] == NULL) {
141 rte_panic("Unable to create LPM table on socket %u\n", socket);
144 for (rule = 0; rule < app.n_lpm_rules; rule ++) {
147 ret = rte_lpm_add(app.lpm_tables[socket],
148 app.lpm_rules[rule].ip,
149 app.lpm_rules[rule].depth,
150 app.lpm_rules[rule].if_out);
153 rte_panic("Unable to add entry %u (%x/%u => %u) to the LPM table on socket %u (%d)\n",
155 (unsigned) app.lpm_rules[rule].ip,
156 (unsigned) app.lpm_rules[rule].depth,
157 (unsigned) app.lpm_rules[rule].if_out,
165 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
166 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
170 socket = rte_lcore_to_socket_id(lcore);
171 app.lcore_params[lcore].worker.lpm_table = app.lpm_tables[socket];
176 app_init_rings_rx(void)
180 /* Initialize the rings for the RX side */
181 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
182 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
183 unsigned socket_io, lcore_worker;
185 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
186 (lp_io->rx.n_nic_queues == 0)) {
190 socket_io = rte_lcore_to_socket_id(lcore);
192 for (lcore_worker = 0; lcore_worker < APP_MAX_LCORES; lcore_worker ++) {
194 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore_worker].worker;
195 struct rte_ring *ring = NULL;
197 if (app.lcore_params[lcore_worker].type != e_APP_LCORE_WORKER) {
201 printf("Creating ring to connect I/O lcore %u (socket %u) with worker lcore %u ...\n",
205 snprintf(name, sizeof(name), "app_ring_rx_s%u_io%u_w%u",
209 ring = rte_ring_create(
213 RING_F_SP_ENQ | RING_F_SC_DEQ);
215 rte_panic("Cannot create ring to connect I/O core %u with worker core %u\n",
220 lp_io->rx.rings[lp_io->rx.n_rings] = ring;
221 lp_io->rx.n_rings ++;
223 lp_worker->rings_in[lp_worker->n_rings_in] = ring;
224 lp_worker->n_rings_in ++;
228 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
229 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
231 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
232 (lp_io->rx.n_nic_queues == 0)) {
236 if (lp_io->rx.n_rings != app_get_lcores_worker()) {
237 rte_panic("Algorithmic error (I/O RX rings)\n");
241 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
242 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
244 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
248 if (lp_worker->n_rings_in != app_get_lcores_io_rx()) {
249 rte_panic("Algorithmic error (worker input rings)\n");
255 app_init_rings_tx(void)
259 /* Initialize the rings for the TX side */
260 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
261 struct app_lcore_params_worker *lp_worker = &app.lcore_params[lcore].worker;
264 if (app.lcore_params[lcore].type != e_APP_LCORE_WORKER) {
268 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
270 struct app_lcore_params_io *lp_io = NULL;
271 struct rte_ring *ring;
272 uint32_t socket_io, lcore_io;
274 if (app.nic_tx_port_mask[port] == 0) {
278 if (app_get_lcore_for_nic_tx(port, &lcore_io) < 0) {
279 rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
283 lp_io = &app.lcore_params[lcore_io].io;
284 socket_io = rte_lcore_to_socket_id(lcore_io);
286 printf("Creating ring to connect worker lcore %u with TX port %u (through I/O lcore %u) (socket %u) ...\n",
287 lcore, port, (unsigned)lcore_io, (unsigned)socket_io);
288 snprintf(name, sizeof(name), "app_ring_tx_s%u_w%u_p%u", socket_io, lcore, port);
289 ring = rte_ring_create(
293 RING_F_SP_ENQ | RING_F_SC_DEQ);
295 rte_panic("Cannot create ring to connect worker core %u with TX port %u\n",
300 lp_worker->rings_out[port] = ring;
301 lp_io->tx.rings[port][lp_worker->worker_id] = ring;
305 for (lcore = 0; lcore < APP_MAX_LCORES; lcore ++) {
306 struct app_lcore_params_io *lp_io = &app.lcore_params[lcore].io;
309 if ((app.lcore_params[lcore].type != e_APP_LCORE_IO) ||
310 (lp_io->tx.n_nic_ports == 0)) {
314 for (i = 0; i < lp_io->tx.n_nic_ports; i ++){
317 port = lp_io->tx.nic_ports[i];
318 for (j = 0; j < app_get_lcores_worker(); j ++) {
319 if (lp_io->tx.rings[port][j] == NULL) {
320 rte_panic("Algorithmic error (I/O TX rings)\n");
327 /* Check the link status of all ports in up to 9s, and print them finally */
329 check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
331 #define CHECK_INTERVAL 100 /* 100ms */
332 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
334 uint8_t count, all_ports_up, print_flag = 0;
335 struct rte_eth_link link;
336 uint32_t n_rx_queues, n_tx_queues;
338 printf("\nChecking link status");
340 for (count = 0; count <= MAX_CHECK_TIME; count++) {
342 for (portid = 0; portid < port_num; portid++) {
343 if ((port_mask & (1 << portid)) == 0)
345 n_rx_queues = app_get_nic_rx_queues_per_port(portid);
346 n_tx_queues = app.nic_tx_port_mask[portid];
347 if ((n_rx_queues == 0) && (n_tx_queues == 0))
349 memset(&link, 0, sizeof(link));
350 rte_eth_link_get_nowait(portid, &link);
351 /* print link status if flag set */
352 if (print_flag == 1) {
353 if (link.link_status)
355 "Port%d Link Up - speed %uMbps - %s\n",
356 portid, link.link_speed,
357 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
358 ("full-duplex") : ("half-duplex\n"));
360 printf("Port %d Link Down\n", portid);
363 /* clear all_ports_up flag if any link down */
364 if (link.link_status == ETH_LINK_DOWN) {
369 /* after finally printing all link status, get out */
373 if (all_ports_up == 0) {
376 rte_delay_ms(CHECK_INTERVAL);
379 /* set the print_flag if all ports up or timeout */
380 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
395 uint32_t n_rx_queues, n_tx_queues;
397 /* Init NIC ports and queues, then start the ports */
398 for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
399 struct rte_mempool *pool;
400 uint16_t nic_rx_ring_size;
401 uint16_t nic_tx_ring_size;
402 struct rte_eth_rxconf rxq_conf;
403 struct rte_eth_txconf txq_conf;
404 struct rte_eth_dev_info dev_info;
405 struct rte_eth_conf local_port_conf = port_conf;
407 n_rx_queues = app_get_nic_rx_queues_per_port(port);
408 n_tx_queues = app.nic_tx_port_mask[port];
410 if ((n_rx_queues == 0) && (n_tx_queues == 0)) {
415 printf("Initializing NIC port %u ...\n", port);
416 rte_eth_dev_info_get(port, &dev_info);
417 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
418 local_port_conf.txmode.offloads |=
419 DEV_TX_OFFLOAD_MBUF_FAST_FREE;
420 ret = rte_eth_dev_configure(
422 (uint8_t) n_rx_queues,
423 (uint8_t) n_tx_queues,
426 rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
428 rte_eth_promiscuous_enable(port);
430 nic_rx_ring_size = app.nic_rx_ring_size;
431 nic_tx_ring_size = app.nic_tx_ring_size;
432 ret = rte_eth_dev_adjust_nb_rx_tx_desc(
433 port, &nic_rx_ring_size, &nic_tx_ring_size);
435 rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
438 app.nic_rx_ring_size = nic_rx_ring_size;
439 app.nic_tx_ring_size = nic_tx_ring_size;
441 rxq_conf = dev_info.default_rxconf;
442 rxq_conf.offloads = local_port_conf.rxmode.offloads;
444 for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) {
445 if (app.nic_rx_queue_mask[port][queue] == 0) {
449 app_get_lcore_for_nic_rx(port, queue, &lcore);
450 socket = rte_lcore_to_socket_id(lcore);
451 pool = app.lcore_params[lcore].pool;
453 printf("Initializing NIC port %u RX queue %u ...\n",
455 ret = rte_eth_rx_queue_setup(
458 (uint16_t) app.nic_rx_ring_size,
463 rte_panic("Cannot init RX queue %u for port %u (%d)\n",
468 txq_conf = dev_info.default_txconf;
469 txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
470 txq_conf.offloads = local_port_conf.txmode.offloads;
472 if (app.nic_tx_port_mask[port] == 1) {
473 app_get_lcore_for_nic_tx(port, &lcore);
474 socket = rte_lcore_to_socket_id(lcore);
475 printf("Initializing NIC port %u TX queue 0 ...\n",
477 ret = rte_eth_tx_queue_setup(
480 (uint16_t) app.nic_tx_ring_size,
484 rte_panic("Cannot init TX queue 0 for port %d (%d)\n",
491 ret = rte_eth_dev_start(port);
493 rte_panic("Cannot start port %d (%d)\n", port, ret);
497 check_all_ports_link_status(APP_MAX_NIC_PORTS, (~0x0));
503 app_assign_worker_ids();
504 app_init_mbuf_pools();
505 app_init_lpm_tables();
510 printf("Initialization completed.\n");