4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_common.h>
37 #include <rte_debug.h>
38 #include <rte_errno.h>
39 #include <rte_ethdev.h>
40 #include <rte_launch.h>
41 #include <rte_lcore.h>
46 #include <rte_byteorder.h>
51 #include "../include/conf.h"
55 #define SEND_PAUSE_FRAME(port_id, duration) send_pause_frame(port_id, duration)
57 #define SEND_PAUSE_FRAME(port_id, duration) do { } while(0)
60 #define ETHER_TYPE_FLOW_CONTROL 0x8808
62 struct ether_fc_frame {
65 } __attribute__((__packed__));
69 unsigned int *low_watermark;
71 uint8_t port_pairs[RTE_MAX_ETHPORTS];
73 struct rte_ring *rings[RTE_MAX_LCORE][RTE_MAX_ETHPORTS];
74 struct rte_mempool *mbuf_pool;
77 static void send_pause_frame(uint8_t port_id, uint16_t duration)
79 struct rte_mbuf *mbuf;
80 struct ether_fc_frame *pause_frame;
81 struct ether_hdr *hdr;
82 struct ether_addr mac_addr;
84 RTE_LOG_DP(DEBUG, USER1,
85 "Sending PAUSE frame (duration=%d) on port %d\n",
88 /* Get a mbuf from the pool */
89 mbuf = rte_pktmbuf_alloc(mbuf_pool);
90 if (unlikely(mbuf == NULL))
93 /* Prepare a PAUSE frame */
94 hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
95 pause_frame = (struct ether_fc_frame *) &hdr[1];
97 rte_eth_macaddr_get(port_id, &mac_addr);
98 ether_addr_copy(&mac_addr, &hdr->s_addr);
100 void *tmp = &hdr->d_addr.addr_bytes[0];
101 *((uint64_t *)tmp) = 0x010000C28001ULL;
103 hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_FLOW_CONTROL);
105 pause_frame->opcode = rte_cpu_to_be_16(0x0001);
106 pause_frame->param = rte_cpu_to_be_16(duration);
111 rte_eth_tx_burst(port_id, 0, &mbuf, 1);
115 * Get the previous enabled lcore ID
118 * The current lcore ID.
120 * The previous enabled lcore_id or -1 if not found.
123 get_previous_lcore_id(unsigned int lcore_id)
127 for (i = lcore_id - 1; i >= 0; i--)
128 if (rte_lcore_is_enabled(i))
135 * Get the last enabled lcore ID
138 * The last enabled lcore_id.
141 get_last_lcore_id(void)
145 for (i = RTE_MAX_LCORE; i >= 0; i--)
146 if (rte_lcore_is_enabled(i))
153 receive_stage(__attribute__((unused)) void *args)
160 unsigned int lcore_id;
162 struct rte_mbuf *pkts[MAX_PKT_QUOTA];
163 struct rte_ring *ring;
164 enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
166 lcore_id = rte_lcore_id();
169 "%s() started on core %u\n", __func__, lcore_id);
173 /* Process each port round robin style */
174 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
176 if (!is_bit_set(port_id, portmask))
179 ring = rings[lcore_id][port_id];
181 if (ring_state[port_id] != RING_READY) {
182 if (rte_ring_count(ring) > *low_watermark)
185 ring_state[port_id] = RING_READY;
188 /* Enqueue received packets on the RX ring */
189 nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
191 ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
193 if (ret == -EDQUOT) {
194 ring_state[port_id] = RING_OVERLOADED;
195 send_pause_frame(port_id, 1337);
198 else if (ret == -ENOBUFS) {
201 * Return mbufs to the pool,
202 * effectively dropping packets
204 for (i = 0; i < nb_rx_pkts; i++)
205 rte_pktmbuf_free(pkts[i]);
212 pipeline_stage(__attribute__((unused)) void *args)
219 unsigned int lcore_id, previous_lcore_id;
221 void *pkts[MAX_PKT_QUOTA];
222 struct rte_ring *rx, *tx;
223 enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
225 lcore_id = rte_lcore_id();
226 previous_lcore_id = get_previous_lcore_id(lcore_id);
229 "%s() started on core %u - processing packets from core %u\n",
230 __func__, lcore_id, previous_lcore_id);
234 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
236 if (!is_bit_set(port_id, portmask))
239 tx = rings[lcore_id][port_id];
240 rx = rings[previous_lcore_id][port_id];
242 if (ring_state[port_id] != RING_READY) {
243 if (rte_ring_count(tx) > *low_watermark)
246 ring_state[port_id] = RING_READY;
249 /* Dequeue up to quota mbuf from rx */
250 nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
252 if (unlikely(nb_dq_pkts < 0))
255 /* Enqueue them on tx */
256 ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
258 ring_state[port_id] = RING_OVERLOADED;
260 else if (ret == -ENOBUFS) {
263 * Return mbufs to the pool,
264 * effectively dropping packets
266 for (i = 0; i < nb_dq_pkts; i++)
267 rte_pktmbuf_free(pkts[i]);
274 send_stage(__attribute__((unused)) void *args)
279 uint8_t dest_port_id;
281 unsigned int lcore_id, previous_lcore_id;
284 struct rte_mbuf *tx_pkts[MAX_PKT_QUOTA];
286 lcore_id = rte_lcore_id();
287 previous_lcore_id = get_previous_lcore_id(lcore_id);
290 "%s() started on core %u - processing packets from core %u\n",
291 __func__, lcore_id, previous_lcore_id);
295 /* Process each ring round robin style */
296 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
298 if (!is_bit_set(port_id, portmask))
301 dest_port_id = port_pairs[port_id];
302 tx = rings[previous_lcore_id][port_id];
304 if (rte_ring_empty(tx))
307 /* Dequeue packets from tx and send them */
308 nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
309 (void *) tx_pkts, *quota, NULL);
310 rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);
312 /* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
318 main(int argc, char **argv)
321 unsigned int lcore_id, master_lcore_id, last_lcore_id;
325 rte_set_log_level(RTE_LOG_INFO);
327 ret = rte_eal_init(argc, argv);
329 rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
335 setup_shared_variables();
338 *low_watermark = 60 * RING_SIZE / 100;
340 last_lcore_id = get_last_lcore_id();
341 master_lcore_id = rte_get_master_lcore();
343 /* Parse the application's arguments */
344 ret = parse_qw_args(argc, argv);
346 rte_exit(EXIT_FAILURE, "Invalid quota/watermark argument(s)\n");
348 /* Create a pool of mbuf to store packets */
349 mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
350 MBUF_DATA_SIZE, rte_socket_id());
351 if (mbuf_pool == NULL)
352 rte_panic("%s\n", rte_strerror(rte_errno));
354 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
355 if (is_bit_set(port_id, portmask)) {
356 configure_eth_port(port_id);
357 init_ring(master_lcore_id, port_id);
363 * Start pipeline_connect() on all the available slave lcores
366 for (lcore_id = 0 ; lcore_id < last_lcore_id; lcore_id++) {
367 if (rte_lcore_is_enabled(lcore_id) &&
368 lcore_id != master_lcore_id) {
370 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
371 if (is_bit_set(port_id, portmask))
372 init_ring(lcore_id, port_id);
374 /* typecast is a workaround for GCC 4.3 bug */
375 rte_eal_remote_launch((int (*)(void *))pipeline_stage,
380 /* Start send_stage() on the last slave core */
381 /* typecast is a workaround for GCC 4.3 bug */
382 rte_eal_remote_launch((int (*)(void *))send_stage, NULL, last_lcore_id);
384 /* Start receive_stage() on the master core */