4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
44 #include <sys/queue.h>
49 #include <sys/ioctl.h>
52 #include <rte_common.h>
53 #include <rte_eal_memconfig.h>
55 #include <rte_memory.h>
56 #include <rte_memcpy.h>
57 #include <rte_memzone.h>
59 #include <rte_per_lcore.h>
60 #include <rte_launch.h>
61 #include <rte_atomic.h>
62 #include <rte_cycles.h>
63 #include <rte_prefetch.h>
64 #include <rte_lcore.h>
65 #include <rte_per_lcore.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_interrupts.h>
69 #include <rte_random.h>
70 #include <rte_debug.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_mempool.h>
76 #include <rte_ivshmem.h>
78 #include "../include/common.h"
80 #define MAX_RX_QUEUE_PER_LCORE 16
81 #define MAX_TX_QUEUE_PER_PORT 16
82 struct lcore_queue_conf {
84 unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
85 struct mbuf_table rx_mbufs[RTE_MAX_ETHPORTS];
86 struct vm_port_param * port_param[MAX_RX_QUEUE_PER_LCORE];
87 } __rte_cache_aligned;
88 static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
90 /* Print out statistics on packets dropped */
94 uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
97 total_packets_dropped = 0;
101 const char clr[] = { 27, '[', '2', 'J', '\0' };
102 const char topLeft[] = { 27, '[', '1', ';', '1', 'H','\0' };
104 /* Clear screen and move to top left */
105 printf("%s%s", clr, topLeft);
107 printf("\nPort statistics ====================================");
109 for (portid = 0; portid < ctrl->nb_ports; portid++) {
110 /* skip ports that are not enabled */
111 printf("\nStatistics for port %u ------------------------------"
112 "\nPackets sent: %24"PRIu64
113 "\nPackets received: %20"PRIu64
114 "\nPackets dropped: %21"PRIu64,
116 ctrl->vm_ports[portid].stats.tx,
117 ctrl->vm_ports[portid].stats.rx,
118 ctrl->vm_ports[portid].stats.dropped);
120 total_packets_dropped += ctrl->vm_ports[portid].stats.dropped;
121 total_packets_tx += ctrl->vm_ports[portid].stats.tx;
122 total_packets_rx += ctrl->vm_ports[portid].stats.rx;
124 printf("\nAggregate statistics ==============================="
125 "\nTotal packets sent: %18"PRIu64
126 "\nTotal packets received: %14"PRIu64
127 "\nTotal packets dropped: %15"PRIu64,
130 total_packets_dropped);
131 printf("\n====================================================\n");
136 l2fwd_ivshmem_usage(const char *prgname)
138 printf("%s [EAL options] -- [-q NQ -T PERIOD]\n"
139 " -q NQ: number of queue (=ports) per lcore (default is 1)\n"
140 " -T PERIOD: statistics will be refreshed each PERIOD seconds (0 to disable, 10 default, 86400 maximum)\n",
145 l2fwd_ivshmem_parse_nqueue(const char *q_arg)
150 /* parse hexadecimal string */
151 n = strtoul(q_arg, &end, 10);
152 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
156 if (n >= MAX_RX_QUEUE_PER_LCORE)
163 l2fwd_ivshmem_parse_timer_period(const char *q_arg)
168 /* parse number string */
169 n = strtol(q_arg, &end, 10);
170 if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0'))
172 if (n >= MAX_TIMER_PERIOD)
178 /* Parse the argument given in the command line of the application */
180 l2fwd_ivshmem_parse_args(int argc, char **argv)
185 char *prgname = argv[0];
186 static struct option lgopts[] = {
192 while ((opt = getopt_long(argc, argvopt, "q:p:T:",
193 lgopts, &option_index)) != EOF) {
199 l2fwd_ivshmem_rx_queue_per_lcore = l2fwd_ivshmem_parse_nqueue(optarg);
200 if (l2fwd_ivshmem_rx_queue_per_lcore == 0) {
201 printf("invalid queue number\n");
202 l2fwd_ivshmem_usage(prgname);
209 timer_period = l2fwd_ivshmem_parse_timer_period(optarg) * 1000 * TIMER_MILLISECOND;
210 if (timer_period < 0) {
211 printf("invalid timer period\n");
212 l2fwd_ivshmem_usage(prgname);
219 l2fwd_ivshmem_usage(prgname);
223 l2fwd_ivshmem_usage(prgname);
229 argv[optind-1] = prgname;
232 optind = 0; /* reset getopt lib */
237 * this loop is getting packets from RX rings of each port, and puts them
238 * into TX rings of destination ports.
244 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
245 struct rte_mbuf **m_table;
247 struct rte_ring *rx, *tx;
248 unsigned lcore_id, len;
249 uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
250 unsigned i, j, portid, nb_rx;
251 struct lcore_queue_conf *qconf;
252 struct ether_hdr *eth;
258 lcore_id = rte_lcore_id();
259 qconf = &lcore_queue_conf[lcore_id];
261 if (qconf->n_rx_port == 0) {
262 RTE_LOG(INFO, L2FWD_IVSHMEM, "lcore %u has nothing to do\n", lcore_id);
266 RTE_LOG(INFO, L2FWD_IVSHMEM, "entering main loop on lcore %u\n", lcore_id);
268 for (i = 0; i < qconf->n_rx_port; i++) {
269 portid = qconf->rx_port_list[i];
270 RTE_LOG(INFO, L2FWD_IVSHMEM, " -- lcoreid=%u portid=%u\n", lcore_id,
274 while (ctrl->state == STATE_FWD) {
275 cur_tsc = rte_rdtsc();
277 diff_tsc = cur_tsc - prev_tsc;
280 * Read packet from RX queues and send it to TX queues
282 for (i = 0; i < qconf->n_rx_port; i++) {
284 portid = qconf->rx_port_list[i];
286 len = qconf->rx_mbufs[portid].len;
288 rx = ctrl->vm_ports[portid].rx_ring;
289 tx = ctrl->vm_ports[portid].dst->tx_ring;
291 m_table = qconf->rx_mbufs[portid].m_table;
293 /* if we have something in the queue, try and transmit it down */
296 /* if we succeed in sending the packets down, mark queue as free */
297 if (rte_ring_enqueue_bulk(tx, (void**) m_table, len) == 0) {
298 ctrl->vm_ports[portid].stats.tx += len;
299 qconf->rx_mbufs[portid].len = 0;
304 nb_rx = rte_ring_count(rx);
306 nb_rx = RTE_MIN(nb_rx, (unsigned) MAX_PKT_BURST);
311 /* if we can get packets into the m_table */
312 if (nb_rx < (RTE_DIM(qconf->rx_mbufs[portid].m_table) - len)) {
314 /* this situation cannot exist, so if we fail to dequeue, that
315 * means something went horribly wrong, hence the failure. */
316 if (rte_ring_dequeue_bulk(rx, (void**) pkts_burst, nb_rx) < 0) {
317 ctrl->state = STATE_FAIL;
321 ctrl->vm_ports[portid].stats.rx += nb_rx;
323 /* put packets into the queue */
324 for (j = 0; j < nb_rx; j++) {
327 rte_prefetch0(rte_pktmbuf_mtod(m, void *));
329 m_table[len + j] = m;
331 eth = rte_pktmbuf_mtod(m, struct ether_hdr *);
333 /* 02:00:00:00:00:xx */
334 tmp = ð->d_addr.addr_bytes[0];
335 *((uint64_t *)tmp) = 0x000000000002 + ((uint64_t)portid << 40);
338 ether_addr_copy(&ctrl->vm_ports[portid].dst->ethaddr,
341 qconf->rx_mbufs[portid].len += nb_rx;
347 /* if timer is enabled */
348 if (timer_period > 0) {
350 /* advance the timer */
351 timer_tsc += diff_tsc;
353 /* if timer has reached its timeout */
354 if (unlikely(timer_tsc >= (uint64_t) timer_period)) {
356 /* do this only on master core */
357 if (lcore_id == rte_get_master_lcore()) {
359 /* reset the timer */
370 l2fwd_ivshmem_launch_one_lcore(__attribute__((unused)) void *dummy)
377 main(int argc, char **argv)
379 struct lcore_queue_conf *qconf;
380 const struct rte_memzone * mz;
383 unsigned rx_lcore_id, lcore_id;
386 ret = rte_eal_init(argc, argv);
388 rte_exit(EXIT_FAILURE, "Invalid EAL arguments\n");
392 /* parse application arguments (after the EAL ones) */
393 ret = l2fwd_ivshmem_parse_args(argc, argv);
395 rte_exit(EXIT_FAILURE, "Invalid l2fwd-ivshmem arguments\n");
397 /* find control structure */
398 mz = rte_memzone_lookup(CTRL_MZ_NAME);
400 rte_exit(EXIT_FAILURE, "Cannot find control memzone\n");
402 ctrl = (struct ivshmem_ctrl*) mz->addr;
404 /* lock the ctrl so that we don't have conflicts with anything else */
405 rte_spinlock_lock(&ctrl->lock);
407 if (ctrl->state == STATE_FWD)
408 rte_exit(EXIT_FAILURE, "Forwarding already started!\n");
413 /* Initialize the port/queue configuration of each logical core */
414 for (portid = 0; portid < ctrl->nb_ports; portid++) {
416 /* get the lcore_id for this port */
417 while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
418 lcore_queue_conf[rx_lcore_id].n_rx_port ==
419 l2fwd_ivshmem_rx_queue_per_lcore) {
421 if (rx_lcore_id >= RTE_MAX_LCORE)
422 rte_exit(EXIT_FAILURE, "Not enough cores\n");
425 if (qconf != &lcore_queue_conf[rx_lcore_id])
426 /* Assigned a new logical core in the loop above. */
427 qconf = &lcore_queue_conf[rx_lcore_id];
429 qconf->rx_port_list[qconf->n_rx_port] = portid;
430 qconf->port_param[qconf->n_rx_port] = &ctrl->vm_ports[portid];
433 printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
438 /* indicate that we are ready to forward */
439 ctrl->state = STATE_FWD;
442 rte_spinlock_unlock(&ctrl->lock);
444 /* launch per-lcore init on every lcore */
445 rte_eal_mp_remote_launch(l2fwd_ivshmem_launch_one_lcore, NULL, CALL_MASTER);
446 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
447 if (rte_eal_wait_lcore(lcore_id) < 0)