4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <rte_debug.h>
40 #include <rte_ethdev.h>
41 #include <rte_mempool.h>
42 #include <rte_sched.h>
43 #include <rte_cycles.h>
44 #include <rte_string_fns.h>
49 uint32_t app_numa_mask = 0;
50 static uint32_t app_inited_port_mask = 0;
52 int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES];
54 #define MAX_NAME_LEN 32
56 struct ring_conf ring_conf = {
57 .rx_size = APP_RX_DESC_DEFAULT,
58 .ring_size = APP_RING_SIZE,
59 .tx_size = APP_TX_DESC_DEFAULT,
62 struct burst_conf burst_conf = {
63 .rx_burst = MAX_PKT_RX_BURST,
64 .ring_burst = PKT_ENQUEUE,
65 .qos_dequeue = PKT_DEQUEUE,
66 .tx_burst = MAX_PKT_TX_BURST,
69 struct ring_thresh rx_thresh = {
70 .pthresh = RX_PTHRESH,
71 .hthresh = RX_HTHRESH,
72 .wthresh = RX_WTHRESH,
75 struct ring_thresh tx_thresh = {
76 .pthresh = TX_PTHRESH,
77 .hthresh = TX_HTHRESH,
78 .wthresh = TX_WTHRESH,
82 const char *cfg_profile = NULL;
83 int mp_size = NB_MBUF;
84 struct flow_conf qos_conf[MAX_DATA_STREAMS];
86 static const struct rte_eth_conf port_conf = {
88 .max_rx_pkt_len = ETHER_MAX_LEN,
90 .header_split = 0, /**< Header Split disabled */
91 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
92 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
93 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
94 .hw_strip_crc = 0, /**< CRC stripped by hardware */
97 .mq_mode = ETH_DCB_NONE,
102 app_init_port(uint8_t portid, struct rte_mempool *mp)
105 struct rte_eth_link link;
106 struct rte_eth_rxconf rx_conf;
107 struct rte_eth_txconf tx_conf;
109 /* check if port already initialized (multistream configuration) */
110 if (app_inited_port_mask & (1u << portid))
113 rx_conf.rx_thresh.pthresh = rx_thresh.pthresh;
114 rx_conf.rx_thresh.hthresh = rx_thresh.hthresh;
115 rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
116 rx_conf.rx_free_thresh = 32;
117 rx_conf.rx_drop_en = 0;
119 tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
120 tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
121 tx_conf.tx_thresh.wthresh = tx_thresh.wthresh;
122 tx_conf.tx_free_thresh = 0;
123 tx_conf.tx_rs_thresh = 0;
124 tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS;
127 RTE_LOG(INFO, APP, "Initializing port %"PRIu8"... ", portid);
129 ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
131 rte_exit(EXIT_FAILURE, "Cannot configure device: "
132 "err=%d, port=%"PRIu8"\n", ret, portid);
134 /* init one RX queue */
136 ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
137 rte_eth_dev_socket_id(portid), &rx_conf, mp);
139 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
140 "err=%d, port=%"PRIu8"\n", ret, portid);
142 /* init one TX queue */
144 ret = rte_eth_tx_queue_setup(portid, 0,
145 (uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
147 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
148 "port=%"PRIu8" queue=%d\n", ret, portid, 0);
151 ret = rte_eth_dev_start(portid);
153 rte_exit(EXIT_FAILURE, "rte_pmd_port_start: "
154 "err=%d, port=%"PRIu8"\n", ret, portid);
158 /* get link status */
159 rte_eth_link_get(portid, &link);
160 if (link.link_status) {
161 printf(" Link Up - speed %u Mbps - %s\n",
162 (uint32_t) link.link_speed,
163 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
164 ("full-duplex") : ("half-duplex\n"));
166 printf(" Link Down\n");
168 rte_eth_promiscuous_enable(portid);
170 /* mark port as initialized */
171 app_inited_port_mask |= 1u << portid;
176 static struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
178 .tb_rate = 1250000000,
181 .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
186 static struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT] = {
191 .tc_rate = {305175, 305175, 305175, 305175},
193 #ifdef RTE_SCHED_SUBPORT_TC_OV
197 .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
201 struct rte_sched_port_params port_params = {
202 .name = "port_scheduler_0",
203 .socket = 0, /* computed */
204 .rate = 0, /* computed */
205 .mtu = 6 + 6 + 4 + 4 + 2 + 1500,
206 .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
207 .n_subports_per_port = 1,
208 .n_pipes_per_subport = 4096,
209 .qsize = {64, 64, 64, 64},
210 .pipe_profiles = pipe_profiles,
211 .n_pipe_profiles = sizeof(pipe_profiles) / sizeof(struct rte_sched_pipe_params),
215 /* Traffic Class 0 Colors Green / Yellow / Red */
216 [0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
217 [0][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
218 [0][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
220 /* Traffic Class 1 - Colors Green / Yellow / Red */
221 [1][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
222 [1][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
223 [1][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
225 /* Traffic Class 2 - Colors Green / Yellow / Red */
226 [2][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
227 [2][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
228 [2][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
230 /* Traffic Class 3 - Colors Green / Yellow / Red */
231 [3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
232 [3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
233 [3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}
235 #endif /* RTE_SCHED_RED */
238 static struct rte_sched_port *
239 app_init_sched_port(uint32_t portid, uint32_t socketid)
241 static char port_name[32]; /* static as referenced from global port_params*/
242 struct rte_eth_link link;
243 struct rte_sched_port *port = NULL;
244 uint32_t pipe, subport;
247 rte_eth_link_get((uint8_t)portid, &link);
249 port_params.socket = socketid;
250 port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
251 snprintf(port_name, sizeof(port_name), "port_%d", portid);
252 port_params.name = port_name;
254 port = rte_sched_port_config(&port_params);
256 rte_exit(EXIT_FAILURE, "Unable to config sched port\n");
259 for (subport = 0; subport < port_params.n_subports_per_port; subport ++) {
260 err = rte_sched_subport_config(port, subport, &subport_params[subport]);
262 rte_exit(EXIT_FAILURE, "Unable to config sched subport %u, err=%d\n",
266 for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) {
267 if (app_pipe_to_profile[subport][pipe] != -1) {
268 err = rte_sched_pipe_config(port, subport, pipe,
269 app_pipe_to_profile[subport][pipe]);
271 rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u "
272 "for profile %d, err=%d\n", pipe,
273 app_pipe_to_profile[subport][pipe], err);
283 app_load_cfg_profile(const char *profile)
288 struct cfg_file *cfg_file = cfg_load(profile, 0);
289 if (cfg_file == NULL)
290 rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile);
292 cfg_load_port(cfg_file, &port_params);
293 cfg_load_subport(cfg_file, subport_params);
294 cfg_load_pipe(cfg_file, pipe_profiles);
304 char ring_name[MAX_NAME_LEN];
305 char pool_name[MAX_NAME_LEN];
307 if (rte_eth_dev_count() == 0)
308 rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");
310 /* load configuration profile */
311 if (app_load_cfg_profile(cfg_profile) != 0)
312 rte_exit(EXIT_FAILURE, "Invalid configuration profile\n");
314 /* Initialize each active flow */
315 for(i = 0; i < nb_pfc; i++) {
316 uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
317 struct rte_ring *ring;
319 snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
320 ring = rte_ring_lookup(ring_name);
322 qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
323 socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
325 qos_conf[i].rx_ring = ring;
327 snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
328 ring = rte_ring_lookup(ring_name);
330 qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
331 socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
333 qos_conf[i].tx_ring = ring;
336 /* create the mbuf pools for each RX Port */
337 snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
338 qos_conf[i].mbuf_pool = rte_mempool_create(pool_name, mp_size, MBUF_SIZE,
339 burst_conf.rx_burst * 4,
340 sizeof(struct rte_pktmbuf_pool_private),
341 rte_pktmbuf_pool_init, NULL,
342 rte_pktmbuf_init, NULL,
343 rte_eth_dev_socket_id(qos_conf[i].rx_port),
345 if (qos_conf[i].mbuf_pool == NULL)
346 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i);
348 app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
349 app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
351 qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].tx_port, socket);
354 RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n",
357 RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
358 "NIC TX = %u\n", ring_conf.rx_size, mp_size, ring_conf.ring_size,
361 RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n"
362 " Worker read/QoS enqueue = %hu,\n"
363 " QoS dequeue = %hu, Worker write = %hu\n",
364 burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst,
365 burst_conf.qos_dequeue, burst_conf.tx_burst);
367 RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
368 "TX (p = %hhu, h = %hhu, w = %hhu)\n",
369 rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
370 tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);