1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
10 #include <rte_debug.h>
11 #include <rte_ethdev.h>
12 #include <rte_mempool.h>
13 #include <rte_sched.h>
14 #include <rte_cycles.h>
15 #include <rte_string_fns.h>
16 #include <rte_cfgfile.h>
21 uint32_t app_numa_mask = 0;
22 static uint32_t app_inited_port_mask = 0;
24 int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES];
26 #define MAX_NAME_LEN 32
28 struct ring_conf ring_conf = {
29 .rx_size = APP_RX_DESC_DEFAULT,
30 .ring_size = APP_RING_SIZE,
31 .tx_size = APP_TX_DESC_DEFAULT,
34 struct burst_conf burst_conf = {
35 .rx_burst = MAX_PKT_RX_BURST,
36 .ring_burst = PKT_ENQUEUE,
37 .qos_dequeue = PKT_DEQUEUE,
38 .tx_burst = MAX_PKT_TX_BURST,
41 struct ring_thresh rx_thresh = {
42 .pthresh = RX_PTHRESH,
43 .hthresh = RX_HTHRESH,
44 .wthresh = RX_WTHRESH,
47 struct ring_thresh tx_thresh = {
48 .pthresh = TX_PTHRESH,
49 .hthresh = TX_HTHRESH,
50 .wthresh = TX_WTHRESH,
54 const char *cfg_profile = NULL;
55 int mp_size = NB_MBUF;
56 struct flow_conf qos_conf[MAX_DATA_STREAMS];
58 static struct rte_eth_conf port_conf = {
63 .mq_mode = RTE_ETH_MQ_TX_NONE,
68 app_init_port(uint16_t portid, struct rte_mempool *mp)
71 struct rte_eth_link link;
72 struct rte_eth_dev_info dev_info;
73 struct rte_eth_rxconf rx_conf;
74 struct rte_eth_txconf tx_conf;
77 struct rte_eth_conf local_port_conf = port_conf;
78 char link_status_text[RTE_ETH_LINK_MAX_STR_LEN];
80 /* check if port already initialized (multistream configuration) */
81 if (app_inited_port_mask & (1u << portid))
84 rx_conf.rx_thresh.pthresh = rx_thresh.pthresh;
85 rx_conf.rx_thresh.hthresh = rx_thresh.hthresh;
86 rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
87 rx_conf.rx_free_thresh = 32;
88 rx_conf.rx_drop_en = 0;
89 rx_conf.rx_deferred_start = 0;
91 tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
92 tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
93 tx_conf.tx_thresh.wthresh = tx_thresh.wthresh;
94 tx_conf.tx_free_thresh = 0;
95 tx_conf.tx_rs_thresh = 0;
96 tx_conf.tx_deferred_start = 0;
99 RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid);
102 ret = rte_eth_dev_info_get(portid, &dev_info);
104 rte_exit(EXIT_FAILURE,
105 "Error during getting device (port %u) info: %s\n",
106 portid, strerror(-ret));
108 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
109 local_port_conf.txmode.offloads |=
110 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
111 ret = rte_eth_dev_configure(portid, 1, 1, &local_port_conf);
113 rte_exit(EXIT_FAILURE,
114 "Cannot configure device: err=%d, port=%u\n",
117 rx_size = ring_conf.rx_size;
118 tx_size = ring_conf.tx_size;
119 ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size);
121 rte_exit(EXIT_FAILURE,
122 "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d,port=%u\n",
124 ring_conf.rx_size = rx_size;
125 ring_conf.tx_size = tx_size;
127 /* init one RX queue */
129 rx_conf.offloads = local_port_conf.rxmode.offloads;
130 ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
131 rte_eth_dev_socket_id(portid), &rx_conf, mp);
133 rte_exit(EXIT_FAILURE,
134 "rte_eth_tx_queue_setup: err=%d, port=%u\n",
137 /* init one TX queue */
139 tx_conf.offloads = local_port_conf.txmode.offloads;
140 ret = rte_eth_tx_queue_setup(portid, 0,
141 (uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
143 rte_exit(EXIT_FAILURE,
144 "rte_eth_tx_queue_setup: err=%d, port=%u queue=%d\n",
148 ret = rte_eth_dev_start(portid);
150 rte_exit(EXIT_FAILURE,
151 "rte_pmd_port_start: err=%d, port=%u\n",
156 /* get link status */
157 ret = rte_eth_link_get(portid, &link);
159 rte_exit(EXIT_FAILURE,
160 "rte_eth_link_get: err=%d, port=%u: %s\n",
161 ret, portid, rte_strerror(-ret));
163 rte_eth_link_to_str(link_status_text, sizeof(link_status_text), &link);
164 printf("%s\n", link_status_text);
166 ret = rte_eth_promiscuous_enable(portid);
168 rte_exit(EXIT_FAILURE,
169 "rte_eth_promiscuous_enable: err=%s, port=%u\n",
170 rte_strerror(-ret), portid);
172 /* mark port as initialized */
173 app_inited_port_mask |= 1u << portid;
178 static struct rte_sched_pipe_params pipe_profiles[MAX_SCHED_PIPE_PROFILES] = {
183 .tc_rate = {305175, 305175, 305175, 305175, 305175, 305175,
184 305175, 305175, 305175, 305175, 305175, 305175, 305175},
186 #ifdef RTE_SCHED_SUBPORT_TC_OV
190 .wrr_weights = {1, 1, 1, 1},
194 static struct rte_sched_subport_profile_params
195 subport_profile[MAX_SCHED_SUBPORT_PROFILES] = {
197 .tb_rate = 1250000000,
199 .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000,
200 1250000000, 1250000000, 1250000000, 1250000000, 1250000000,
201 1250000000, 1250000000, 1250000000, 1250000000},
206 #ifdef RTE_SCHED_CMAN
207 struct rte_sched_cman_params cman_params = {
208 .cman_mode = RTE_SCHED_CMAN_RED,
210 /* Traffic Class 0 Colors Green / Yellow / Red */
211 [0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
212 [0][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
213 [0][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
215 /* Traffic Class 1 - Colors Green / Yellow / Red */
216 [1][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
217 [1][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
218 [1][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
220 /* Traffic Class 2 - Colors Green / Yellow / Red */
221 [2][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
222 [2][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
223 [2][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
225 /* Traffic Class 3 - Colors Green / Yellow / Red */
226 [3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
227 [3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
228 [3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
230 /* Traffic Class 4 - Colors Green / Yellow / Red */
231 [4][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
232 [4][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
233 [4][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
235 /* Traffic Class 5 - Colors Green / Yellow / Red */
236 [5][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
237 [5][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
238 [5][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
240 /* Traffic Class 6 - Colors Green / Yellow / Red */
241 [6][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
242 [6][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
243 [6][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
245 /* Traffic Class 7 - Colors Green / Yellow / Red */
246 [7][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
247 [7][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
248 [7][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
250 /* Traffic Class 8 - Colors Green / Yellow / Red */
251 [8][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
252 [8][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
253 [8][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
255 /* Traffic Class 9 - Colors Green / Yellow / Red */
256 [9][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
257 [9][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
258 [9][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
260 /* Traffic Class 10 - Colors Green / Yellow / Red */
261 [10][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
262 [10][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
263 [10][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
265 /* Traffic Class 11 - Colors Green / Yellow / Red */
266 [11][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
267 [11][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
268 [11][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
270 /* Traffic Class 12 - Colors Green / Yellow / Red */
271 [12][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
272 [12][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
273 [12][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
276 #endif /* RTE_SCHED_CMAN */
278 struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
280 .n_pipes_per_subport_enabled = 4096,
281 .qsize = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64},
282 .pipe_profiles = pipe_profiles,
283 .n_pipe_profiles = sizeof(pipe_profiles) /
284 sizeof(struct rte_sched_pipe_params),
285 .n_max_pipe_profiles = MAX_SCHED_PIPE_PROFILES,
286 #ifdef RTE_SCHED_CMAN
287 .cman_params = &cman_params,
288 #endif /* RTE_SCHED_CMAN */
292 struct rte_sched_port_params port_params = {
293 .name = "port_scheduler_0",
294 .socket = 0, /* computed */
295 .rate = 0, /* computed */
296 .mtu = 6 + 6 + 4 + 4 + 2 + 1500,
297 .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
298 .n_subports_per_port = 1,
299 .n_subport_profiles = 1,
300 .subport_profiles = subport_profile,
301 .n_max_subport_profiles = MAX_SCHED_SUBPORT_PROFILES,
302 .n_pipes_per_subport = MAX_SCHED_PIPES,
305 static struct rte_sched_port *
306 app_init_sched_port(uint32_t portid, uint32_t socketid)
308 static char port_name[32]; /* static as referenced from global port_params*/
309 struct rte_eth_link link;
310 struct rte_sched_port *port = NULL;
311 uint32_t pipe, subport;
314 err = rte_eth_link_get(portid, &link);
316 rte_exit(EXIT_FAILURE,
317 "rte_eth_link_get: err=%d, port=%u: %s\n",
318 err, portid, rte_strerror(-err));
320 port_params.socket = socketid;
321 port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
322 snprintf(port_name, sizeof(port_name), "port_%d", portid);
323 port_params.name = port_name;
325 port = rte_sched_port_config(&port_params);
327 rte_exit(EXIT_FAILURE, "Unable to config sched port\n");
330 for (subport = 0; subport < port_params.n_subports_per_port; subport ++) {
331 err = rte_sched_subport_config(port, subport,
332 &subport_params[subport],
335 rte_exit(EXIT_FAILURE, "Unable to config sched "
336 "subport %u, err=%d\n", subport, err);
339 uint32_t n_pipes_per_subport =
340 subport_params[subport].n_pipes_per_subport_enabled;
342 for (pipe = 0; pipe < n_pipes_per_subport; pipe++) {
343 if (app_pipe_to_profile[subport][pipe] != -1) {
344 err = rte_sched_pipe_config(port, subport, pipe,
345 app_pipe_to_profile[subport][pipe]);
347 rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u "
348 "for profile %d, err=%d\n", pipe,
349 app_pipe_to_profile[subport][pipe], err);
359 app_load_cfg_profile(const char *profile)
363 struct rte_cfgfile *file = rte_cfgfile_load(profile, 0);
365 rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile);
367 cfg_load_port(file, &port_params);
368 cfg_load_subport(file, subport_params);
369 cfg_load_subport_profile(file, subport_profile);
370 cfg_load_pipe(file, pipe_profiles);
372 rte_cfgfile_close(file);
380 char ring_name[MAX_NAME_LEN];
381 char pool_name[MAX_NAME_LEN];
383 if (rte_eth_dev_count_avail() == 0)
384 rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");
386 /* load configuration profile */
387 if (app_load_cfg_profile(cfg_profile) != 0)
388 rte_exit(EXIT_FAILURE, "Invalid configuration profile\n");
390 /* Initialize each active flow */
391 for(i = 0; i < nb_pfc; i++) {
392 uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
393 struct rte_ring *ring;
395 snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
396 ring = rte_ring_lookup(ring_name);
398 qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
399 socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
401 qos_conf[i].rx_ring = ring;
403 snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
404 ring = rte_ring_lookup(ring_name);
406 qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
407 socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
409 qos_conf[i].tx_ring = ring;
412 /* create the mbuf pools for each RX Port */
413 snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
414 qos_conf[i].mbuf_pool = rte_pktmbuf_pool_create(pool_name,
415 mp_size, burst_conf.rx_burst * 4, 0,
416 RTE_MBUF_DEFAULT_BUF_SIZE,
417 rte_eth_dev_socket_id(qos_conf[i].rx_port));
418 if (qos_conf[i].mbuf_pool == NULL)
419 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i);
421 app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
422 app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
424 qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket);
427 RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n",
430 RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
431 "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size,
434 RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n"
435 " Worker read/QoS enqueue = %hu,\n"
436 " QoS dequeue = %hu, Worker write = %hu\n",
437 burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst,
438 burst_conf.qos_dequeue, burst_conf.tx_burst);
440 RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
441 "TX (p = %hhu, h = %hhu, w = %hhu)\n",
442 rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
443 tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);