4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
51 #include <rte_tailq.h>
53 #include <rte_per_lcore.h>
54 #include <rte_launch.h>
55 #include <rte_atomic.h>
56 #include <rte_cycles.h>
57 #include <rte_prefetch.h>
58 #include <rte_lcore.h>
59 #include <rte_per_lcore.h>
60 #include <rte_branch_prediction.h>
61 #include <rte_interrupts.h>
63 #include <rte_random.h>
64 #include <rte_debug.h>
65 #include <rte_ether.h>
66 #include <rte_ethdev.h>
68 #include <rte_mempool.h>
69 #include <rte_malloc.h>
71 #include <rte_string_fns.h>
79 #define NA APP_SWQ_INVALID
81 struct app_params app = {
84 {0, APP_CORE_MASTER, {15, 16, 17, NA, NA, NA, NA, NA},
85 {12, 13, 14, NA, NA, NA, NA, NA} },
86 {0, APP_CORE_RX, {NA, NA, NA, NA, NA, NA, NA, 12},
87 { 0, 1, 2, 3, NA, NA, NA, 15} },
88 {0, APP_CORE_FC, { 0, 1, 2, 3, NA, NA, NA, 13},
89 { 4, 5, 6, 7, NA, NA, NA, 16} },
90 {0, APP_CORE_RT, { 4, 5, 6, 7, NA, NA, NA, 14},
91 { 8, 9, 10, 11, NA, NA, NA, 17} },
92 {0, APP_CORE_TX, { 8, 9, 10, 11, NA, NA, NA, NA},
93 {NA, NA, NA, NA, NA, NA, NA, NA} },
97 .n_ports = APP_MAX_PORTS,
106 .header_split = 0, /* Header Split disabled */
107 .hw_ip_checksum = 1, /* IP checksum offload enabled */
108 .hw_vlan_filter = 0, /* VLAN filtering disabled */
109 .jumbo_frame = 1, /* Jumbo Frame Support enabled */
110 .max_rx_pkt_len = 9000, /* Jumbo Frame MAC pkt length */
111 .hw_strip_crc = 0, /* CRC stripped by hardware */
116 .rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6,
120 .mq_mode = ETH_MQ_TX_NONE,
130 .rx_free_thresh = 64,
150 .pool_buffer_size = 2048 + sizeof(struct rte_mbuf) +
151 RTE_PKTMBUF_HEADROOM,
152 .pool_size = 32 * 1024,
153 .pool_cache_size = 256,
155 /* Message buffer pool */
156 .msg_pool_buffer_size = 256,
157 .msg_pool_size = 1024,
158 .msg_pool_cache_size = 64,
161 .max_arp_rules = 1 << 10,
162 .max_firewall_rules = 1 << 5,
163 .max_routing_rules = 1 << 24,
164 .max_flow_rules = 1 << 24,
166 /* Application processing */
167 .ether_hdr_pop_push = 0,
170 struct app_core_params *
171 app_get_core_params(uint32_t core_id)
175 for (i = 0; i < RTE_MAX_LCORE; i++) {
176 struct app_core_params *p = &app.cores[i];
178 if (p->core_id != core_id)
188 app_get_n_swq_in(void)
190 uint32_t max_swq_id = 0, i, j;
192 for (i = 0; i < RTE_MAX_LCORE; i++) {
193 struct app_core_params *p = &app.cores[i];
195 if (p->core_type == APP_CORE_NONE)
198 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++) {
199 uint32_t swq_id = p->swq_in[j];
201 if ((swq_id != APP_SWQ_INVALID) &&
202 (swq_id > max_swq_id))
207 return (1 + max_swq_id);
211 app_get_n_swq_out(void)
213 uint32_t max_swq_id = 0, i, j;
215 for (i = 0; i < RTE_MAX_LCORE; i++) {
216 struct app_core_params *p = &app.cores[i];
218 if (p->core_type == APP_CORE_NONE)
221 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++) {
222 uint32_t swq_id = p->swq_out[j];
224 if ((swq_id != APP_SWQ_INVALID) &&
225 (swq_id > max_swq_id))
230 return (1 + max_swq_id);
234 app_get_swq_in_count(uint32_t swq_id)
238 for (n = 0, i = 0; i < RTE_MAX_LCORE; i++) {
239 struct app_core_params *p = &app.cores[i];
242 if (p->core_type == APP_CORE_NONE)
245 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++)
246 if (p->swq_in[j] == swq_id)
254 app_get_swq_out_count(uint32_t swq_id)
258 for (n = 0, i = 0; i < RTE_MAX_LCORE; i++) {
259 struct app_core_params *p = &app.cores[i];
262 if (p->core_type == APP_CORE_NONE)
265 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++)
266 if (p->swq_out[j] == swq_id)
274 app_check_core_params(void)
276 uint32_t n_swq_in = app_get_n_swq_in();
277 uint32_t n_swq_out = app_get_n_swq_out();
280 /* Check that range of SW queues is contiguous and each SW queue has
281 exactly one reader and one writer */
282 if (n_swq_in != n_swq_out)
283 rte_panic("Number of input SW queues is not equal to the "
284 "number of output SW queues\n");
286 for (i = 0; i < n_swq_in; i++) {
287 uint32_t n = app_get_swq_in_count(i);
290 rte_panic("SW queue %u has no reader\n", i);
293 rte_panic("SW queue %u has more than one reader\n", i);
296 for (i = 0; i < n_swq_out; i++) {
297 uint32_t n = app_get_swq_out_count(i);
300 rte_panic("SW queue %u has no writer\n", i);
303 rte_panic("SW queue %u has more than one writer\n", i);
306 /* Check the request and response queues are valid */
307 for (i = 0; i < RTE_MAX_LCORE; i++) {
308 struct app_core_params *p = &app.cores[i];
309 uint32_t ring_id_req, ring_id_resp;
311 if ((p->core_type != APP_CORE_FC) &&
312 (p->core_type != APP_CORE_FW) &&
313 (p->core_type != APP_CORE_RT)) {
317 ring_id_req = p->swq_in[APP_SWQ_IN_REQ];
318 if (ring_id_req == APP_SWQ_INVALID)
319 rte_panic("Core %u of type %u has invalid request "
320 "queue ID\n", p->core_id, p->core_type);
322 ring_id_resp = p->swq_out[APP_SWQ_OUT_RESP];
323 if (ring_id_resp == APP_SWQ_INVALID)
324 rte_panic("Core %u of type %u has invalid response "
325 "queue ID\n", p->core_id, p->core_type);
332 app_get_first_core_id(enum app_core_type core_type)
336 for (i = 0; i < RTE_MAX_LCORE; i++) {
337 struct app_core_params *p = &app.cores[i];
339 if (p->core_type == core_type)
343 return RTE_MAX_LCORE;
347 app_get_ring_req(uint32_t core_id)
349 struct app_core_params *p = app_get_core_params(core_id);
350 uint32_t ring_req_id = p->swq_in[APP_SWQ_IN_REQ];
352 return app.rings[ring_req_id];
356 app_get_ring_resp(uint32_t core_id)
358 struct app_core_params *p = app_get_core_params(core_id);
359 uint32_t ring_resp_id = p->swq_out[APP_SWQ_OUT_RESP];
361 return app.rings[ring_resp_id];
365 app_init_mbuf_pools(void)
367 /* Init the buffer pool */
368 RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n");
369 app.pool = rte_mempool_create(
372 app.pool_buffer_size,
374 sizeof(struct rte_pktmbuf_pool_private),
375 rte_pktmbuf_pool_init, NULL,
376 rte_pktmbuf_init, NULL,
379 if (app.pool == NULL)
380 rte_panic("Cannot create mbuf pool\n");
382 /* Init the indirect buffer pool */
383 RTE_LOG(INFO, USER1, "Creating the indirect mbuf pool ...\n");
384 app.indirect_pool = rte_mempool_create(
387 sizeof(struct rte_mbuf) + sizeof(struct app_pkt_metadata),
391 rte_pktmbuf_init, NULL,
394 if (app.indirect_pool == NULL)
395 rte_panic("Cannot create mbuf pool\n");
397 /* Init the message buffer pool */
398 RTE_LOG(INFO, USER1, "Creating the message pool ...\n");
399 app.msg_pool = rte_mempool_create(
402 app.msg_pool_buffer_size,
403 app.msg_pool_cache_size,
406 rte_ctrlmbuf_init, NULL,
409 if (app.msg_pool == NULL)
410 rte_panic("Cannot create message pool\n");
418 n_swq = app_get_n_swq_in();
419 RTE_LOG(INFO, USER1, "Initializing %u SW rings ...\n", n_swq);
421 app.rings = rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *),
422 CACHE_LINE_SIZE, rte_socket_id());
423 if (app.rings == NULL)
424 rte_panic("Cannot allocate memory to store ring pointers\n");
426 for (i = 0; i < n_swq; i++) {
427 struct rte_ring *ring;
430 snprintf(name, sizeof(name), "app_ring_%u", i);
432 ring = rte_ring_create(
436 RING_F_SP_ENQ | RING_F_SC_DEQ);
439 rte_panic("Cannot create ring %u\n", i);
446 app_ports_check_link(void)
448 uint32_t all_ports_up, i;
452 for (i = 0; i < app.n_ports; i++) {
453 struct rte_eth_link link;
457 memset(&link, 0, sizeof(link));
458 rte_eth_link_get_nowait(port, &link);
459 RTE_LOG(INFO, USER1, "Port %u (%u Gbps) %s\n",
461 link.link_speed / 1000,
462 link.link_status ? "UP" : "DOWN");
464 if (link.link_status == 0)
468 if (all_ports_up == 0)
469 rte_panic("Some NIC ports are DOWN\n");
477 /* Init NIC ports, then start the ports */
478 for (i = 0; i < app.n_ports; i++) {
483 RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port);
486 ret = rte_eth_dev_configure(
492 rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
493 rte_eth_promiscuous_enable(port);
496 ret = rte_eth_rx_queue_setup(
500 rte_eth_dev_socket_id(port),
504 rte_panic("Cannot init RX for port %u (%d)\n",
505 (uint32_t) port, ret);
508 ret = rte_eth_tx_queue_setup(
512 rte_eth_dev_socket_id(port),
515 rte_panic("Cannot init TX for port %u (%d)\n", port,
519 ret = rte_eth_dev_start(port);
521 rte_panic("Cannot start port %u (%d)\n", port, ret);
524 app_ports_check_link();
527 #define APP_PING_TIMEOUT_SEC 5
533 uint64_t timestamp, diff_tsc;
535 const uint64_t timeout = rte_get_tsc_hz() * APP_PING_TIMEOUT_SEC;
537 for (i = 0; i < RTE_MAX_LCORE; i++) {
538 struct app_core_params *p = &app.cores[i];
539 struct rte_ring *ring_req, *ring_resp;
541 struct app_msg_req *req;
544 if ((p->core_type != APP_CORE_FC) &&
545 (p->core_type != APP_CORE_FW) &&
546 (p->core_type != APP_CORE_RT) &&
547 (p->core_type != APP_CORE_RX))
550 ring_req = app_get_ring_req(p->core_id);
551 ring_resp = app_get_ring_resp(p->core_id);
553 /* Fill request message */
554 msg = (void *)rte_ctrlmbuf_alloc(app.msg_pool);
556 rte_panic("Unable to allocate new message\n");
558 req = (struct app_msg_req *)
559 rte_ctrlmbuf_data((struct rte_mbuf *)msg);
560 req->type = APP_MSG_REQ_PING;
564 status = rte_ring_sp_enqueue(ring_req, msg);
565 } while (status == -ENOBUFS);
567 /* Wait for response */
568 timestamp = rte_rdtsc();
570 status = rte_ring_sc_dequeue(ring_resp, &msg);
571 diff_tsc = rte_rdtsc() - timestamp;
573 if (unlikely(diff_tsc > timeout))
574 rte_panic("Core %u of type %d does not respond "
575 "to requests\n", p->core_id,
577 } while (status != 0);
579 /* Free message buffer */
580 rte_ctrlmbuf_free(msg);
587 if ((app_get_first_core_id(APP_CORE_IPV4_FRAG) != RTE_MAX_LCORE) ||
588 (app_get_first_core_id(APP_CORE_IPV4_RAS) != RTE_MAX_LCORE)) {
590 "Activating the Ethernet header pop/push ...\n");
591 app.ether_hdr_pop_push = 1;
598 if ((sizeof(struct app_pkt_metadata) % CACHE_LINE_SIZE) != 0)
599 rte_panic("Application pkt meta-data size mismatch\n");
601 app_check_core_params();
603 app_init_mbuf_pools();
608 RTE_LOG(INFO, USER1, "Initialization completed\n");