examples: check status of getting MAC address
[dpdk.git] / examples / quota_watermark / qw / main.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4
5 #include <rte_eal.h>
6
7 #include <rte_common.h>
8 #include <rte_debug.h>
9 #include <rte_errno.h>
10 #include <rte_ethdev.h>
11 #include <rte_launch.h>
12 #include <rte_lcore.h>
13 #include <rte_log.h>
14 #include <rte_mbuf.h>
15 #include <rte_ring.h>
16
17 #include <rte_byteorder.h>
18
19 #include "args.h"
20 #include "main.h"
21 #include "init.h"
22 #include "../include/conf.h"
23
24
25 #ifdef QW_SOFTWARE_FC
26 #define SEND_PAUSE_FRAME(port_id, duration) send_pause_frame(port_id, duration)
27 #else
28 #define SEND_PAUSE_FRAME(port_id, duration) do { } while(0)
29 #endif
30
31 #define ETHER_TYPE_FLOW_CONTROL 0x8808
32
33 struct ether_fc_frame {
34         uint16_t opcode;
35         uint16_t param;
36 } __attribute__((__packed__));
37
38
39 int *quota;
40 unsigned int *low_watermark;
41 unsigned int *high_watermark;
42
43 uint16_t port_pairs[RTE_MAX_ETHPORTS];
44
45 struct rte_ring *rings[RTE_MAX_LCORE][RTE_MAX_ETHPORTS];
46 struct rte_mempool *mbuf_pool;
47
48
49 static void send_pause_frame(uint16_t port_id, uint16_t duration)
50 {
51         struct rte_mbuf *mbuf;
52         struct ether_fc_frame *pause_frame;
53         struct rte_ether_hdr *hdr;
54         struct rte_ether_addr mac_addr;
55         int ret;
56
57         RTE_LOG_DP(DEBUG, USER1,
58                         "Sending PAUSE frame (duration=%d) on port %d\n",
59                         duration, port_id);
60
61         ret = rte_eth_macaddr_get(port_id, &mac_addr);
62         if (ret != 0) {
63                 RTE_LOG_DP(ERR, USER1,
64                                 "Failed to get MAC address (port %u): %s\n",
65                                 port_id, rte_strerror(-ret));
66                 return;
67         }
68
69         /* Get a mbuf from the pool */
70         mbuf = rte_pktmbuf_alloc(mbuf_pool);
71         if (unlikely(mbuf == NULL))
72                 return;
73
74         /* Prepare a PAUSE frame */
75         hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
76         pause_frame = (struct ether_fc_frame *) &hdr[1];
77
78         rte_ether_addr_copy(&mac_addr, &hdr->s_addr);
79
80         void *tmp = &hdr->d_addr.addr_bytes[0];
81         *((uint64_t *)tmp) = 0x010000C28001ULL;
82
83         hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_FLOW_CONTROL);
84
85         pause_frame->opcode = rte_cpu_to_be_16(0x0001);
86         pause_frame->param  = rte_cpu_to_be_16(duration);
87
88         mbuf->pkt_len  = 60;
89         mbuf->data_len = 60;
90
91         rte_eth_tx_burst(port_id, 0, &mbuf, 1);
92 }
93
94 /**
95  * Get the previous enabled lcore ID
96  *
97  * @param lcore_id
98  *   The current lcore ID.
99  * @return
100  *   The previous enabled lcore_id or -1 if not found.
101  */
102 static unsigned int
103 get_previous_lcore_id(unsigned int lcore_id)
104 {
105         int i;
106
107         for (i = lcore_id - 1; i >= 0; i--)
108                 if (rte_lcore_is_enabled(i))
109                         return i;
110
111         return -1;
112 }
113
114 /**
115  * Get the last enabled lcore ID
116  *
117  * @return
118  *   The last enabled lcore_id.
119  */
120 static unsigned int
121 get_last_lcore_id(void)
122 {
123         int i;
124
125         for (i = RTE_MAX_LCORE; i >= 0; i--)
126                 if (rte_lcore_is_enabled(i))
127                         return i;
128
129         return 0;
130 }
131
132 static void
133 receive_stage(__attribute__((unused)) void *args)
134 {
135         int i, ret;
136
137         uint16_t port_id;
138         uint16_t nb_rx_pkts;
139
140         unsigned int lcore_id;
141         unsigned int free;
142
143         struct rte_mbuf *pkts[MAX_PKT_QUOTA];
144         struct rte_ring *ring;
145         enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
146
147         lcore_id = rte_lcore_id();
148
149         RTE_LOG(INFO, USER1,
150                         "%s() started on core %u\n", __func__, lcore_id);
151
152         while (1) {
153
154                 /* Process each port round robin style */
155                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
156
157                         if (!is_bit_set(port_id, portmask))
158                                 continue;
159
160                         ring = rings[lcore_id][port_id];
161
162                         if (ring_state[port_id] != RING_READY) {
163                                 if (rte_ring_count(ring) > *low_watermark)
164                                         continue;
165                                 else
166                                         ring_state[port_id] = RING_READY;
167                         }
168
169                         /* Enqueue received packets on the RX ring */
170                         nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
171                                         (uint16_t) *quota);
172                         ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
173                                         nb_rx_pkts, &free);
174                         if (RING_SIZE - free > *high_watermark) {
175                                 ring_state[port_id] = RING_OVERLOADED;
176                                 send_pause_frame(port_id, 1337);
177                         }
178
179                         if (ret == 0) {
180
181                                 /*
182                                  * Return  mbufs to the pool,
183                                  * effectively dropping packets
184                                  */
185                                 for (i = 0; i < nb_rx_pkts; i++)
186                                         rte_pktmbuf_free(pkts[i]);
187                         }
188                 }
189         }
190 }
191
192 static int
193 pipeline_stage(__attribute__((unused)) void *args)
194 {
195         int i, ret;
196         int nb_dq_pkts;
197
198         uint16_t port_id;
199
200         unsigned int lcore_id, previous_lcore_id;
201         unsigned int free;
202
203         void *pkts[MAX_PKT_QUOTA];
204         struct rte_ring *rx, *tx;
205         enum ring_state ring_state[RTE_MAX_ETHPORTS] = { RING_READY };
206
207         lcore_id = rte_lcore_id();
208         previous_lcore_id = get_previous_lcore_id(lcore_id);
209
210         RTE_LOG(INFO, USER1,
211                         "%s() started on core %u - processing packets from core %u\n",
212                         __func__, lcore_id, previous_lcore_id);
213
214         while (1) {
215
216                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
217
218                         if (!is_bit_set(port_id, portmask))
219                                 continue;
220
221                         tx = rings[lcore_id][port_id];
222                         rx = rings[previous_lcore_id][port_id];
223
224                         if (ring_state[port_id] != RING_READY) {
225                                 if (rte_ring_count(tx) > *low_watermark)
226                                         continue;
227                                 else
228                                         ring_state[port_id] = RING_READY;
229                         }
230
231                         /* Dequeue up to quota mbuf from rx */
232                         nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
233                                         *quota, NULL);
234                         if (unlikely(nb_dq_pkts < 0))
235                                 continue;
236
237                         /* Enqueue them on tx */
238                         ret = rte_ring_enqueue_bulk(tx, pkts,
239                                         nb_dq_pkts, &free);
240                         if (RING_SIZE - free > *high_watermark)
241                                 ring_state[port_id] = RING_OVERLOADED;
242
243                         if (ret == 0) {
244
245                                 /*
246                                  * Return  mbufs to the pool,
247                                  * effectively dropping packets
248                                  */
249                                 for (i = 0; i < nb_dq_pkts; i++)
250                                         rte_pktmbuf_free(pkts[i]);
251                         }
252                 }
253         }
254
255         return 0;
256 }
257
258 static int
259 send_stage(__attribute__((unused)) void *args)
260 {
261         uint16_t nb_dq_pkts;
262
263         uint16_t port_id;
264         uint16_t dest_port_id;
265
266         unsigned int lcore_id, previous_lcore_id;
267
268         struct rte_ring *tx;
269         struct rte_mbuf *tx_pkts[MAX_PKT_QUOTA];
270
271         lcore_id = rte_lcore_id();
272         previous_lcore_id = get_previous_lcore_id(lcore_id);
273
274         RTE_LOG(INFO, USER1,
275                         "%s() started on core %u - processing packets from core %u\n",
276                         __func__, lcore_id, previous_lcore_id);
277
278         while (1) {
279
280                 /* Process each ring round robin style */
281                 for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
282
283                         if (!is_bit_set(port_id, portmask))
284                                 continue;
285
286                         dest_port_id = port_pairs[port_id];
287                         tx = rings[previous_lcore_id][port_id];
288
289                         if (rte_ring_empty(tx))
290                                 continue;
291
292                         /* Dequeue packets from tx and send them */
293                         nb_dq_pkts = (uint16_t) rte_ring_dequeue_burst(tx,
294                                         (void *) tx_pkts, *quota, NULL);
295                         rte_eth_tx_burst(dest_port_id, 0, tx_pkts, nb_dq_pkts);
296
297                         /* TODO: Check if nb_dq_pkts == nb_tx_pkts? */
298                 }
299         }
300
301         return 0;
302 }
303
304 int
305 main(int argc, char **argv)
306 {
307         int ret;
308         unsigned int lcore_id, master_lcore_id, last_lcore_id;
309
310         uint16_t port_id;
311
312         rte_log_set_global_level(RTE_LOG_INFO);
313
314         ret = rte_eal_init(argc, argv);
315         if (ret < 0)
316                 rte_exit(EXIT_FAILURE, "Cannot initialize EAL\n");
317
318         argc -= ret;
319         argv += ret;
320
321         init_dpdk();
322         setup_shared_variables();
323
324         *quota = 32;
325         *low_watermark = 60 * RING_SIZE / 100;
326
327         last_lcore_id   = get_last_lcore_id();
328         master_lcore_id = rte_get_master_lcore();
329
330         /* Parse the application's arguments */
331         ret = parse_qw_args(argc, argv);
332         if (ret < 0)
333                 rte_exit(EXIT_FAILURE, "Invalid quota/watermark argument(s)\n");
334
335         /* Create a pool of mbuf to store packets */
336         mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
337                         MBUF_DATA_SIZE, rte_socket_id());
338         if (mbuf_pool == NULL)
339                 rte_panic("%s\n", rte_strerror(rte_errno));
340
341         for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
342                 if (is_bit_set(port_id, portmask)) {
343                         configure_eth_port(port_id);
344                         init_ring(master_lcore_id, port_id);
345                 }
346
347         pair_ports();
348
349         /*
350          * Start pipeline_connect() on all the available slave lcores
351          * but the last
352          */
353         for (lcore_id = 0 ; lcore_id < last_lcore_id; lcore_id++) {
354                 if (rte_lcore_is_enabled(lcore_id) &&
355                                 lcore_id != master_lcore_id) {
356
357                         for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
358                                 if (is_bit_set(port_id, portmask))
359                                         init_ring(lcore_id, port_id);
360
361                         rte_eal_remote_launch(pipeline_stage,
362                                         NULL, lcore_id);
363                 }
364         }
365
366         /* Start send_stage() on the last slave core */
367         rte_eal_remote_launch(send_stage, NULL, last_lcore_id);
368
369         /* Start receive_stage() on the master core */
370         receive_stage(NULL);
371
372         return 0;
373 }