1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
7 #include <rte_common.h>
10 #include <rte_ethdev.h>
11 #include <rte_launch.h>
12 #include <rte_lcore.h>
17 #include <rte_byteorder.h>
22 #include "../include/conf.h"
26 #define SEND_PAUSE_FRAME(port_id, duration) send_pause_frame(port_id, duration)
28 #define SEND_PAUSE_FRAME(port_id, duration) do { } while(0)
31 #define ETHER_TYPE_FLOW_CONTROL 0x8808
33 struct ether_fc_frame {
36 } __attribute__((__packed__));
40 unsigned int *low_watermark;
41 unsigned int *high_watermark;
43 uint16_t port_pairs[RTE_MAX_ETHPORTS];
45 struct rte_ring *rings[RTE_MAX_LCORE][RTE_MAX_ETHPORTS];
46 struct rte_mempool *mbuf_pool;
49 static void send_pause_frame(uint16_t port_id, uint16_t duration)
51 struct rte_mbuf *mbuf;
52 struct ether_fc_frame *pause_frame;
53 struct rte_ether_hdr *hdr;
54 struct rte_ether_addr mac_addr;
57 RTE_LOG_DP(DEBUG, USER1,
58 "Sending PAUSE frame (duration=%d) on port %d\n",
61 ret = rte_eth_macaddr_get(port_id, &mac_addr);
63 RTE_LOG_DP(ERR, USER1,
64 "Failed to get MAC address (port %u): %s\n",
65 port_id, rte_strerror(-ret));
69 /* Get a mbuf from the pool */
70 mbuf = rte_pktmbuf_alloc(mbuf_pool);
71 if (unlikely(mbuf == NULL))
74 /* Prepare a PAUSE frame */
75 hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
76 pause_frame = (struct ether_fc_frame *) &hdr[1];
78 rte_ether_addr_copy(&mac_addr, &hdr->s_addr);
80 void *tmp = &hdr->d_addr.addr_bytes[0];
81 *((uint64_t *)tmp) = 0x010000C28001ULL;
83 hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_FLOW_CONTROL);
85 pause_frame->opcode = rte_cpu_to_be_16(0x0001);
86 pause_frame->param = rte_cpu_to_be_16(duration);
91 rte_eth_tx_burst(port_id, 0, &mbuf, 1);
95 * Get the previous enabled lcore ID
98 * The current lcore ID.
100 * The previous enabled lcore_id or -1 if not found.
103 get_previous_lcore_id(unsigned int lcore_id)
107 for (i = lcore_id - 1; i >= 0; i--)
108 if (rte_lcore_is_enabled(i))
115 * Get the last enabled lcore ID
118 * The last enabled lcore_id.
121 get_last_lcore_id(void)
125 for (i = RTE_MAX_LCORE; i >= 0; i--)
126 if (rte_lcore_is_enabled(i))
133 receive_stage(__attribute__((unused)) void *args)
140 unsigned int lcore_id;
143 struct rte_mbuf *pkts[MAX_PKT_QUOTA];
144 struct rte_ring *ring;
145 enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
147 lcore_id = rte_lcore_id();
150 "%s() started on core %u\n", __func__, lcore_id);
154 /* Process each port round robin style */
155 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
157 if (!is_bit_set(port_id, portmask))
160 ring = rings[lcore_id][port_id];
162 if (ring_state[port_id] != RING_READY) {
163 if (rte_ring_count(ring) > *low_watermark)
166 ring_state[port_id] = RING_READY;
169 /* Enqueue received packets on the RX ring */
170 nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
172 ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
174 if (RING_SIZE - free > *high_watermark) {
175 ring_state[port_id] = RING_OVERLOADED;
176 send_pause_frame(port_id, 1337);
182 * Return mbufs to the pool,
183 * effectively dropping packets
185 for (i = 0; i < nb_rx_pkts; i++)
186 rte_pktmbuf_free(pkts[i]);
193 pipeline_stage(__attribute__((unused)) void *args)
200 unsigned int lcore_id, previous_lcore_id;
203 void *pkts[MAX_PKT_QUOTA];
204 struct rte_ring *rx, *tx;
205 enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
207 lcore_id = rte_lcore_id();
208 previous_lcore_id = get_previous_lcore_id(lcore_id);
211 "%s() started on core %u - processing packets from core %u\n",
212 __func__, lcore_id, previous_lcore_id);
216 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
218 if (!is_bit_set(port_id, portmask))
221 tx = rings[lcore_id][port_id];
222 rx = rings[previous_lcore_id][port_id];
224 if (ring_state[port_id] != RING_READY) {
225 if (rte_ring_count(tx) > *low_watermark)
228 ring_state[port_id] = RING_READY;
231 /* Dequeue up to quota mbuf from rx */
232 nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
234 if (unlikely(nb_dq_pkts < 0))
237 /* Enqueue them on tx */
238 ret = rte_ring_enqueue_bulk(tx, pkts,
240 if (RING_SIZE - free > *high_watermark)
241 ring_state[port_id] = RING_OVERLOADED;
246 * Return mbufs to the pool,
247 * effectively dropping packets
249 for (i = 0; i < nb_dq_pkts; i++)
250 rte_pktmbuf_free(pkts[i]);
259 send_stage(__attribute__((unused)) void *args)
264 uint16_t dest_port_id;
266 unsigned int lcore_id, previous_lcore_id;
269 struct rte_mbuf *tx_pkts[MAX_PKT_QUOTA];
271 lcore_id = rte_lcore_id();
272 previous_lcore_id = get_previous_lcore_id(lcore_id);
275 "%s() started on core %u - processing packets from core %u\n",
276 __func__, lcore_id, previous_lcore_id);
280 /* Process each ring round robin style */
281 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
283 if (!is_bit_set(port_id, portmask))
286 dest_port_id = port_pairs[port_id];
287 tx = rings[previous_lcore_id][port_id];
289 if (rte_ring_empty(tx))
292 /* Dequeue packets from tx and send them */
293 nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
294 (void *) tx_pkts, *quota, NULL);
295 rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);
297 /* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
305 main(int argc, char **argv)
308 unsigned int lcore_id, master_lcore_id, last_lcore_id;
312 rte_log_set_global_level(RTE_LOG_INFO);
314 ret = rte_eal_init(argc, argv);
316 rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
322 setup_shared_variables();
325 *low_watermark = 60 * RING_SIZE / 100;
327 last_lcore_id = get_last_lcore_id();
328 master_lcore_id = rte_get_master_lcore();
330 /* Parse the application's arguments */
331 ret = parse_qw_args(argc, argv);
333 rte_exit(EXIT_FAILURE, "Invalid quota/watermark argument(s)\n");
335 /* Create a pool of mbuf to store packets */
336 mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
337 MBUF_DATA_SIZE, rte_socket_id());
338 if (mbuf_pool == NULL)
339 rte_panic("%s\n", rte_strerror(rte_errno));
341 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
342 if (is_bit_set(port_id, portmask)) {
343 configure_eth_port(port_id);
344 init_ring(master_lcore_id, port_id);
350 * Start pipeline_connect() on all the available slave lcores
353 for (lcore_id = 0 ; lcore_id < last_lcore_id; lcore_id++) {
354 if (rte_lcore_is_enabled(lcore_id) &&
355 lcore_id != master_lcore_id) {
357 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
358 if (is_bit_set(port_id, portmask))
359 init_ring(lcore_id, port_id);
361 rte_eal_remote_launch(pipeline_stage,
366 /* Start send_stage() on the last slave core */
367 rte_eal_remote_launch(send_stage, NULL, last_lcore_id);
369 /* Start receive_stage() on the master core */