4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_string_fns.h>
78 #define NA APP_SWQ_INVALID
80 struct app_params app = {
83 {0, APP_CORE_MASTER, {15, 16, 17, NA, NA, NA, NA, NA},
84 {12, 13, 14, NA, NA, NA, NA, NA} },
85 {0, APP_CORE_RX, {NA, NA, NA, NA, NA, NA, NA, 12},
86 { 0, 1, 2, 3, NA, NA, NA, 15} },
87 {0, APP_CORE_FC, { 0, 1, 2, 3, NA, NA, NA, 13},
88 { 4, 5, 6, 7, NA, NA, NA, 16} },
89 {0, APP_CORE_RT, { 4, 5, 6, 7, NA, NA, NA, 14},
90 { 8, 9, 10, 11, NA, NA, NA, 17} },
91 {0, APP_CORE_TX, { 8, 9, 10, 11, NA, NA, NA, NA},
92 {NA, NA, NA, NA, NA, NA, NA, NA} },
96 .n_ports = APP_MAX_PORTS,
105 .header_split = 0, /* Header Split disabled */
106 .hw_ip_checksum = 1, /* IP checksum offload enabled */
107 .hw_vlan_filter = 0, /* VLAN filtering disabled */
108 .jumbo_frame = 1, /* Jumbo Frame Support enabled */
109 .max_rx_pkt_len = 9000, /* Jumbo Frame MAC pkt length */
110 .hw_strip_crc = 0, /* CRC stripped by hardware */
115 .rss_hf = ETH_RSS_IP,
119 .mq_mode = ETH_MQ_TX_NONE,
129 .rx_free_thresh = 64,
149 .pool_buffer_size = 2048 + RTE_PKTMBUF_HEADROOM,
150 .pool_size = 32 * 1024,
151 .pool_cache_size = 256,
153 /* Message buffer pool */
154 .msg_pool_buffer_size = 256,
155 .msg_pool_size = 1024,
156 .msg_pool_cache_size = 64,
159 .max_arp_rules = 1 << 10,
160 .max_firewall_rules = 1 << 5,
161 .max_routing_rules = 1 << 24,
162 .max_flow_rules = 1 << 24,
164 /* Application processing */
165 .ether_hdr_pop_push = 0,
168 struct app_core_params *
169 app_get_core_params(uint32_t core_id)
173 for (i = 0; i < RTE_MAX_LCORE; i++) {
174 struct app_core_params *p = &app.cores[i];
176 if (p->core_id != core_id)
186 app_get_n_swq_in(void)
188 uint32_t max_swq_id = 0, i, j;
190 for (i = 0; i < RTE_MAX_LCORE; i++) {
191 struct app_core_params *p = &app.cores[i];
193 if (p->core_type == APP_CORE_NONE)
196 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++) {
197 uint32_t swq_id = p->swq_in[j];
199 if ((swq_id != APP_SWQ_INVALID) &&
200 (swq_id > max_swq_id))
205 return (1 + max_swq_id);
209 app_get_n_swq_out(void)
211 uint32_t max_swq_id = 0, i, j;
213 for (i = 0; i < RTE_MAX_LCORE; i++) {
214 struct app_core_params *p = &app.cores[i];
216 if (p->core_type == APP_CORE_NONE)
219 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++) {
220 uint32_t swq_id = p->swq_out[j];
222 if ((swq_id != APP_SWQ_INVALID) &&
223 (swq_id > max_swq_id))
228 return (1 + max_swq_id);
232 app_get_swq_in_count(uint32_t swq_id)
236 for (n = 0, i = 0; i < RTE_MAX_LCORE; i++) {
237 struct app_core_params *p = &app.cores[i];
240 if (p->core_type == APP_CORE_NONE)
243 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++)
244 if (p->swq_in[j] == swq_id)
252 app_get_swq_out_count(uint32_t swq_id)
256 for (n = 0, i = 0; i < RTE_MAX_LCORE; i++) {
257 struct app_core_params *p = &app.cores[i];
260 if (p->core_type == APP_CORE_NONE)
263 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++)
264 if (p->swq_out[j] == swq_id)
272 app_check_core_params(void)
274 uint32_t n_swq_in = app_get_n_swq_in();
275 uint32_t n_swq_out = app_get_n_swq_out();
278 /* Check that range of SW queues is contiguous and each SW queue has
279 exactly one reader and one writer */
280 if (n_swq_in != n_swq_out)
281 rte_panic("Number of input SW queues is not equal to the "
282 "number of output SW queues\n");
284 for (i = 0; i < n_swq_in; i++) {
285 uint32_t n = app_get_swq_in_count(i);
288 rte_panic("SW queue %u has no reader\n", i);
291 rte_panic("SW queue %u has more than one reader\n", i);
294 for (i = 0; i < n_swq_out; i++) {
295 uint32_t n = app_get_swq_out_count(i);
298 rte_panic("SW queue %u has no writer\n", i);
301 rte_panic("SW queue %u has more than one writer\n", i);
304 /* Check the request and response queues are valid */
305 for (i = 0; i < RTE_MAX_LCORE; i++) {
306 struct app_core_params *p = &app.cores[i];
307 uint32_t ring_id_req, ring_id_resp;
309 if ((p->core_type != APP_CORE_FC) &&
310 (p->core_type != APP_CORE_FW) &&
311 (p->core_type != APP_CORE_RT)) {
315 ring_id_req = p->swq_in[APP_SWQ_IN_REQ];
316 if (ring_id_req == APP_SWQ_INVALID)
317 rte_panic("Core %u of type %u has invalid request "
318 "queue ID\n", p->core_id, p->core_type);
320 ring_id_resp = p->swq_out[APP_SWQ_OUT_RESP];
321 if (ring_id_resp == APP_SWQ_INVALID)
322 rte_panic("Core %u of type %u has invalid response "
323 "queue ID\n", p->core_id, p->core_type);
330 app_get_first_core_id(enum app_core_type core_type)
334 for (i = 0; i < RTE_MAX_LCORE; i++) {
335 struct app_core_params *p = &app.cores[i];
337 if (p->core_type == core_type)
341 return RTE_MAX_LCORE;
345 app_get_ring_req(uint32_t core_id)
347 struct app_core_params *p = app_get_core_params(core_id);
348 uint32_t ring_req_id = p->swq_in[APP_SWQ_IN_REQ];
350 return app.rings[ring_req_id];
354 app_get_ring_resp(uint32_t core_id)
356 struct app_core_params *p = app_get_core_params(core_id);
357 uint32_t ring_resp_id = p->swq_out[APP_SWQ_OUT_RESP];
359 return app.rings[ring_resp_id];
363 app_init_mbuf_pools(void)
365 /* Init the buffer pool */
366 RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n");
367 app.pool = rte_pktmbuf_pool_create("mempool", app.pool_size,
368 app.pool_cache_size, 0, app.pool_buffer_size, rte_socket_id());
369 if (app.pool == NULL)
370 rte_panic("Cannot create mbuf pool\n");
372 /* Init the indirect buffer pool */
373 RTE_LOG(INFO, USER1, "Creating the indirect mbuf pool ...\n");
374 app.indirect_pool = rte_pktmbuf_pool_create("indirect mempool",
375 app.pool_size, app.pool_cache_size,
376 sizeof(struct app_pkt_metadata), 0, rte_socket_id());
377 if (app.indirect_pool == NULL)
378 rte_panic("Cannot create mbuf pool\n");
380 /* Init the message buffer pool */
381 RTE_LOG(INFO, USER1, "Creating the message pool ...\n");
382 app.msg_pool = rte_mempool_create(
385 app.msg_pool_buffer_size,
386 app.msg_pool_cache_size,
389 rte_ctrlmbuf_init, NULL,
392 if (app.msg_pool == NULL)
393 rte_panic("Cannot create message pool\n");
401 n_swq = app_get_n_swq_in();
402 RTE_LOG(INFO, USER1, "Initializing %u SW rings ...\n", n_swq);
404 app.rings = rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *),
405 RTE_CACHE_LINE_SIZE, rte_socket_id());
406 if (app.rings == NULL)
407 rte_panic("Cannot allocate memory to store ring pointers\n");
409 for (i = 0; i < n_swq; i++) {
410 struct rte_ring *ring;
413 snprintf(name, sizeof(name), "app_ring_%u", i);
415 ring = rte_ring_create(
419 RING_F_SP_ENQ | RING_F_SC_DEQ);
422 rte_panic("Cannot create ring %u\n", i);
429 app_ports_check_link(void)
431 uint32_t all_ports_up, i;
435 for (i = 0; i < app.n_ports; i++) {
436 struct rte_eth_link link;
440 memset(&link, 0, sizeof(link));
441 rte_eth_link_get_nowait(port, &link);
442 RTE_LOG(INFO, USER1, "Port %u (%u Gbps) %s\n",
444 link.link_speed / 1000,
445 link.link_status ? "UP" : "DOWN");
447 if (link.link_status == 0)
451 if (all_ports_up == 0)
452 rte_panic("Some NIC ports are DOWN\n");
460 /* Init NIC ports, then start the ports */
461 for (i = 0; i < app.n_ports; i++) {
466 RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port);
469 ret = rte_eth_dev_configure(
475 rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
476 rte_eth_promiscuous_enable(port);
479 ret = rte_eth_rx_queue_setup(
483 rte_eth_dev_socket_id(port),
487 rte_panic("Cannot init RX for port %u (%d)\n",
488 (uint32_t) port, ret);
491 ret = rte_eth_tx_queue_setup(
495 rte_eth_dev_socket_id(port),
498 rte_panic("Cannot init TX for port %u (%d)\n", port,
502 ret = rte_eth_dev_start(port);
504 rte_panic("Cannot start port %u (%d)\n", port, ret);
507 app_ports_check_link();
510 #define APP_PING_TIMEOUT_SEC 5
516 uint64_t timestamp, diff_tsc;
518 const uint64_t timeout = rte_get_tsc_hz() * APP_PING_TIMEOUT_SEC;
520 for (i = 0; i < RTE_MAX_LCORE; i++) {
521 struct app_core_params *p = &app.cores[i];
522 struct rte_ring *ring_req, *ring_resp;
524 struct app_msg_req *req;
527 if ((p->core_type != APP_CORE_FC) &&
528 (p->core_type != APP_CORE_FW) &&
529 (p->core_type != APP_CORE_RT) &&
530 (p->core_type != APP_CORE_RX))
533 ring_req = app_get_ring_req(p->core_id);
534 ring_resp = app_get_ring_resp(p->core_id);
536 /* Fill request message */
537 msg = (void *)rte_ctrlmbuf_alloc(app.msg_pool);
539 rte_panic("Unable to allocate new message\n");
541 req = (struct app_msg_req *)
542 rte_ctrlmbuf_data((struct rte_mbuf *)msg);
543 req->type = APP_MSG_REQ_PING;
547 status = rte_ring_sp_enqueue(ring_req, msg);
548 } while (status == -ENOBUFS);
550 /* Wait for response */
551 timestamp = rte_rdtsc();
553 status = rte_ring_sc_dequeue(ring_resp, &msg);
554 diff_tsc = rte_rdtsc() - timestamp;
556 if (unlikely(diff_tsc > timeout))
557 rte_panic("Core %u of type %d does not respond "
558 "to requests\n", p->core_id,
560 } while (status != 0);
562 /* Free message buffer */
563 rte_ctrlmbuf_free(msg);
570 if ((app_get_first_core_id(APP_CORE_IPV4_FRAG) != RTE_MAX_LCORE) ||
571 (app_get_first_core_id(APP_CORE_IPV4_RAS) != RTE_MAX_LCORE)) {
573 "Activating the Ethernet header pop/push ...\n");
574 app.ether_hdr_pop_push = 1;
581 if ((sizeof(struct app_pkt_metadata) % RTE_CACHE_LINE_SIZE) != 0)
582 rte_panic("Application pkt meta-data size mismatch\n");
584 app_check_core_params();
586 app_init_mbuf_pools();
591 RTE_LOG(INFO, USER1, "Initialization completed\n");