4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
40 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
52 #include <rte_per_lcore.h>
53 #include <rte_launch.h>
54 #include <rte_atomic.h>
55 #include <rte_cycles.h>
56 #include <rte_prefetch.h>
57 #include <rte_lcore.h>
58 #include <rte_per_lcore.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_interrupts.h>
62 #include <rte_random.h>
63 #include <rte_debug.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_string_fns.h>
78 #define NA APP_SWQ_INVALID
80 struct app_params app = {
83 {0, APP_CORE_MASTER, {15, 16, 17, NA, NA, NA, NA, NA},
84 {12, 13, 14, NA, NA, NA, NA, NA} },
85 {0, APP_CORE_RX, {NA, NA, NA, NA, NA, NA, NA, 12},
86 { 0, 1, 2, 3, NA, NA, NA, 15} },
87 {0, APP_CORE_FC, { 0, 1, 2, 3, NA, NA, NA, 13},
88 { 4, 5, 6, 7, NA, NA, NA, 16} },
89 {0, APP_CORE_RT, { 4, 5, 6, 7, NA, NA, NA, 14},
90 { 8, 9, 10, 11, NA, NA, NA, 17} },
91 {0, APP_CORE_TX, { 8, 9, 10, 11, NA, NA, NA, NA},
92 {NA, NA, NA, NA, NA, NA, NA, NA} },
96 .n_ports = APP_MAX_PORTS,
105 .header_split = 0, /* Header Split disabled */
106 .hw_ip_checksum = 1, /* IP checksum offload enabled */
107 .hw_vlan_filter = 0, /* VLAN filtering disabled */
108 .jumbo_frame = 1, /* Jumbo Frame Support enabled */
109 .max_rx_pkt_len = 9000, /* Jumbo Frame MAC pkt length */
110 .hw_strip_crc = 0, /* CRC stripped by hardware */
115 .rss_hf = ETH_RSS_IP,
119 .mq_mode = ETH_MQ_TX_NONE,
129 .rx_free_thresh = 64,
149 .pool_buffer_size = 2048 + sizeof(struct rte_mbuf) +
150 RTE_PKTMBUF_HEADROOM,
151 .pool_size = 32 * 1024,
152 .pool_cache_size = 256,
154 /* Message buffer pool */
155 .msg_pool_buffer_size = 256,
156 .msg_pool_size = 1024,
157 .msg_pool_cache_size = 64,
160 .max_arp_rules = 1 << 10,
161 .max_firewall_rules = 1 << 5,
162 .max_routing_rules = 1 << 24,
163 .max_flow_rules = 1 << 24,
165 /* Application processing */
166 .ether_hdr_pop_push = 0,
169 struct app_core_params *
170 app_get_core_params(uint32_t core_id)
174 for (i = 0; i < RTE_MAX_LCORE; i++) {
175 struct app_core_params *p = &app.cores[i];
177 if (p->core_id != core_id)
187 app_get_n_swq_in(void)
189 uint32_t max_swq_id = 0, i, j;
191 for (i = 0; i < RTE_MAX_LCORE; i++) {
192 struct app_core_params *p = &app.cores[i];
194 if (p->core_type == APP_CORE_NONE)
197 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++) {
198 uint32_t swq_id = p->swq_in[j];
200 if ((swq_id != APP_SWQ_INVALID) &&
201 (swq_id > max_swq_id))
206 return (1 + max_swq_id);
210 app_get_n_swq_out(void)
212 uint32_t max_swq_id = 0, i, j;
214 for (i = 0; i < RTE_MAX_LCORE; i++) {
215 struct app_core_params *p = &app.cores[i];
217 if (p->core_type == APP_CORE_NONE)
220 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++) {
221 uint32_t swq_id = p->swq_out[j];
223 if ((swq_id != APP_SWQ_INVALID) &&
224 (swq_id > max_swq_id))
229 return (1 + max_swq_id);
233 app_get_swq_in_count(uint32_t swq_id)
237 for (n = 0, i = 0; i < RTE_MAX_LCORE; i++) {
238 struct app_core_params *p = &app.cores[i];
241 if (p->core_type == APP_CORE_NONE)
244 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++)
245 if (p->swq_in[j] == swq_id)
253 app_get_swq_out_count(uint32_t swq_id)
257 for (n = 0, i = 0; i < RTE_MAX_LCORE; i++) {
258 struct app_core_params *p = &app.cores[i];
261 if (p->core_type == APP_CORE_NONE)
264 for (j = 0; j < APP_MAX_SWQ_PER_CORE; j++)
265 if (p->swq_out[j] == swq_id)
273 app_check_core_params(void)
275 uint32_t n_swq_in = app_get_n_swq_in();
276 uint32_t n_swq_out = app_get_n_swq_out();
279 /* Check that range of SW queues is contiguous and each SW queue has
280 exactly one reader and one writer */
281 if (n_swq_in != n_swq_out)
282 rte_panic("Number of input SW queues is not equal to the "
283 "number of output SW queues\n");
285 for (i = 0; i < n_swq_in; i++) {
286 uint32_t n = app_get_swq_in_count(i);
289 rte_panic("SW queue %u has no reader\n", i);
292 rte_panic("SW queue %u has more than one reader\n", i);
295 for (i = 0; i < n_swq_out; i++) {
296 uint32_t n = app_get_swq_out_count(i);
299 rte_panic("SW queue %u has no writer\n", i);
302 rte_panic("SW queue %u has more than one writer\n", i);
305 /* Check the request and response queues are valid */
306 for (i = 0; i < RTE_MAX_LCORE; i++) {
307 struct app_core_params *p = &app.cores[i];
308 uint32_t ring_id_req, ring_id_resp;
310 if ((p->core_type != APP_CORE_FC) &&
311 (p->core_type != APP_CORE_FW) &&
312 (p->core_type != APP_CORE_RT)) {
316 ring_id_req = p->swq_in[APP_SWQ_IN_REQ];
317 if (ring_id_req == APP_SWQ_INVALID)
318 rte_panic("Core %u of type %u has invalid request "
319 "queue ID\n", p->core_id, p->core_type);
321 ring_id_resp = p->swq_out[APP_SWQ_OUT_RESP];
322 if (ring_id_resp == APP_SWQ_INVALID)
323 rte_panic("Core %u of type %u has invalid response "
324 "queue ID\n", p->core_id, p->core_type);
331 app_get_first_core_id(enum app_core_type core_type)
335 for (i = 0; i < RTE_MAX_LCORE; i++) {
336 struct app_core_params *p = &app.cores[i];
338 if (p->core_type == core_type)
342 return RTE_MAX_LCORE;
346 app_get_ring_req(uint32_t core_id)
348 struct app_core_params *p = app_get_core_params(core_id);
349 uint32_t ring_req_id = p->swq_in[APP_SWQ_IN_REQ];
351 return app.rings[ring_req_id];
355 app_get_ring_resp(uint32_t core_id)
357 struct app_core_params *p = app_get_core_params(core_id);
358 uint32_t ring_resp_id = p->swq_out[APP_SWQ_OUT_RESP];
360 return app.rings[ring_resp_id];
364 app_init_mbuf_pools(void)
366 struct rte_pktmbuf_pool_private indirect_mbp_priv;
368 /* Init the buffer pool */
369 RTE_LOG(INFO, USER1, "Creating the mbuf pool ...\n");
370 app.pool = rte_mempool_create(
373 app.pool_buffer_size,
375 sizeof(struct rte_pktmbuf_pool_private),
376 rte_pktmbuf_pool_init, NULL,
377 rte_pktmbuf_init, NULL,
380 if (app.pool == NULL)
381 rte_panic("Cannot create mbuf pool\n");
383 /* Init the indirect buffer pool */
384 RTE_LOG(INFO, USER1, "Creating the indirect mbuf pool ...\n");
385 indirect_mbp_priv.mbuf_data_room_size = 0;
386 indirect_mbp_priv.mbuf_priv_size = sizeof(struct app_pkt_metadata);
387 app.indirect_pool = rte_mempool_create(
390 sizeof(struct rte_mbuf) + sizeof(struct app_pkt_metadata),
392 sizeof(struct rte_pktmbuf_pool_private),
393 rte_pktmbuf_pool_init, &indirect_mbp_priv,
394 rte_pktmbuf_init, NULL,
397 if (app.indirect_pool == NULL)
398 rte_panic("Cannot create mbuf pool\n");
400 /* Init the message buffer pool */
401 RTE_LOG(INFO, USER1, "Creating the message pool ...\n");
402 app.msg_pool = rte_mempool_create(
405 app.msg_pool_buffer_size,
406 app.msg_pool_cache_size,
409 rte_ctrlmbuf_init, NULL,
412 if (app.msg_pool == NULL)
413 rte_panic("Cannot create message pool\n");
421 n_swq = app_get_n_swq_in();
422 RTE_LOG(INFO, USER1, "Initializing %u SW rings ...\n", n_swq);
424 app.rings = rte_malloc_socket(NULL, n_swq * sizeof(struct rte_ring *),
425 RTE_CACHE_LINE_SIZE, rte_socket_id());
426 if (app.rings == NULL)
427 rte_panic("Cannot allocate memory to store ring pointers\n");
429 for (i = 0; i < n_swq; i++) {
430 struct rte_ring *ring;
433 snprintf(name, sizeof(name), "app_ring_%u", i);
435 ring = rte_ring_create(
439 RING_F_SP_ENQ | RING_F_SC_DEQ);
442 rte_panic("Cannot create ring %u\n", i);
449 app_ports_check_link(void)
451 uint32_t all_ports_up, i;
455 for (i = 0; i < app.n_ports; i++) {
456 struct rte_eth_link link;
460 memset(&link, 0, sizeof(link));
461 rte_eth_link_get_nowait(port, &link);
462 RTE_LOG(INFO, USER1, "Port %u (%u Gbps) %s\n",
464 link.link_speed / 1000,
465 link.link_status ? "UP" : "DOWN");
467 if (link.link_status == 0)
471 if (all_ports_up == 0)
472 rte_panic("Some NIC ports are DOWN\n");
480 /* Init NIC ports, then start the ports */
481 for (i = 0; i < app.n_ports; i++) {
486 RTE_LOG(INFO, USER1, "Initializing NIC port %u ...\n", port);
489 ret = rte_eth_dev_configure(
495 rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
496 rte_eth_promiscuous_enable(port);
499 ret = rte_eth_rx_queue_setup(
503 rte_eth_dev_socket_id(port),
507 rte_panic("Cannot init RX for port %u (%d)\n",
508 (uint32_t) port, ret);
511 ret = rte_eth_tx_queue_setup(
515 rte_eth_dev_socket_id(port),
518 rte_panic("Cannot init TX for port %u (%d)\n", port,
522 ret = rte_eth_dev_start(port);
524 rte_panic("Cannot start port %u (%d)\n", port, ret);
527 app_ports_check_link();
530 #define APP_PING_TIMEOUT_SEC 5
536 uint64_t timestamp, diff_tsc;
538 const uint64_t timeout = rte_get_tsc_hz() * APP_PING_TIMEOUT_SEC;
540 for (i = 0; i < RTE_MAX_LCORE; i++) {
541 struct app_core_params *p = &app.cores[i];
542 struct rte_ring *ring_req, *ring_resp;
544 struct app_msg_req *req;
547 if ((p->core_type != APP_CORE_FC) &&
548 (p->core_type != APP_CORE_FW) &&
549 (p->core_type != APP_CORE_RT) &&
550 (p->core_type != APP_CORE_RX))
553 ring_req = app_get_ring_req(p->core_id);
554 ring_resp = app_get_ring_resp(p->core_id);
556 /* Fill request message */
557 msg = (void *)rte_ctrlmbuf_alloc(app.msg_pool);
559 rte_panic("Unable to allocate new message\n");
561 req = (struct app_msg_req *)
562 rte_ctrlmbuf_data((struct rte_mbuf *)msg);
563 req->type = APP_MSG_REQ_PING;
567 status = rte_ring_sp_enqueue(ring_req, msg);
568 } while (status == -ENOBUFS);
570 /* Wait for response */
571 timestamp = rte_rdtsc();
573 status = rte_ring_sc_dequeue(ring_resp, &msg);
574 diff_tsc = rte_rdtsc() - timestamp;
576 if (unlikely(diff_tsc > timeout))
577 rte_panic("Core %u of type %d does not respond "
578 "to requests\n", p->core_id,
580 } while (status != 0);
582 /* Free message buffer */
583 rte_ctrlmbuf_free(msg);
590 if ((app_get_first_core_id(APP_CORE_IPV4_FRAG) != RTE_MAX_LCORE) ||
591 (app_get_first_core_id(APP_CORE_IPV4_RAS) != RTE_MAX_LCORE)) {
593 "Activating the Ethernet header pop/push ...\n");
594 app.ether_hdr_pop_push = 1;
601 if ((sizeof(struct app_pkt_metadata) % RTE_CACHE_LINE_SIZE) != 0)
602 rte_panic("Application pkt meta-data size mismatch\n");
604 app_check_core_params();
606 app_init_mbuf_pools();
611 RTE_LOG(INFO, USER1, "Initialization completed\n");