4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_debug.h>
41 #include <rte_ethdev.h>
42 #include <rte_mempool.h>
43 #include <rte_sched.h>
44 #include <rte_cycles.h>
45 #include <rte_string_fns.h>
50 uint32_t app_numa_mask = 0;
51 static uint32_t app_inited_port_mask = 0;
53 int app_pipe_to_profile[MAX_SCHED_SUBPORTS][MAX_SCHED_PIPES];
55 #define MAX_NAME_LEN 32
57 struct ring_conf ring_conf = {
58 .rx_size = APP_RX_DESC_DEFAULT,
59 .ring_size = APP_RING_SIZE,
60 .tx_size = APP_TX_DESC_DEFAULT,
63 struct burst_conf burst_conf = {
64 .rx_burst = MAX_PKT_RX_BURST,
65 .ring_burst = PKT_ENQUEUE,
66 .qos_dequeue = PKT_DEQUEUE,
67 .tx_burst = MAX_PKT_TX_BURST,
70 struct ring_thresh rx_thresh = {
71 .pthresh = RX_PTHRESH,
72 .hthresh = RX_HTHRESH,
73 .wthresh = RX_WTHRESH,
76 struct ring_thresh tx_thresh = {
77 .pthresh = TX_PTHRESH,
78 .hthresh = TX_HTHRESH,
79 .wthresh = TX_WTHRESH,
83 const char *cfg_profile = NULL;
84 struct flow_conf qos_conf[MAX_DATA_STREAMS];
86 static const struct rte_eth_conf port_conf = {
88 .max_rx_pkt_len = ETHER_MAX_LEN,
90 .header_split = 0, /**< Header Split disabled */
91 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
92 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
93 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
94 .hw_strip_crc = 0, /**< CRC stripped by hardware */
97 .mq_mode = ETH_DCB_NONE,
102 app_init_port(uint8_t portid, struct rte_mempool *mp)
105 struct rte_eth_link link;
106 struct rte_eth_rxconf rx_conf;
107 struct rte_eth_txconf tx_conf;
109 /* check if port already initialized (multistream configuration) */
110 if (app_inited_port_mask & (1u << portid))
113 rx_conf.rx_thresh.pthresh = rx_thresh.pthresh;
114 rx_conf.rx_thresh.hthresh = rx_thresh.hthresh;
115 rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
116 rx_conf.rx_free_thresh = 32;
117 rx_conf.rx_drop_en = 0;
119 tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
120 tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
121 tx_conf.tx_thresh.wthresh = tx_thresh.wthresh;
122 tx_conf.tx_free_thresh = 0;
123 tx_conf.tx_rs_thresh = 0;
124 tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS;
127 RTE_LOG(INFO, APP, "Initializing port %hu... ", portid);
129 ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
131 rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%hu\n",
134 /* init one RX queue */
136 ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
137 rte_eth_dev_socket_id(portid), &rx_conf, mp);
139 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%hu\n",
142 /* init one TX queue */
144 ret = rte_eth_tx_queue_setup(portid, 0,
145 (uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
147 rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
148 "port=%hu queue=%d\n",
152 ret = rte_eth_dev_start(portid);
154 rte_exit(EXIT_FAILURE, "rte_pmd_port_start: err=%d, port=%hu\n",
159 /* get link status */
160 rte_eth_link_get(portid, &link);
161 if (link.link_status) {
162 printf(" Link Up - speed %u Mbps - %s\n",
163 (uint32_t) link.link_speed,
164 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
165 ("full-duplex") : ("half-duplex\n"));
167 printf(" Link Down\n");
169 rte_eth_promiscuous_enable(portid);
171 /* mark port as initialized */
172 app_inited_port_mask |= 1u << portid;
177 static struct rte_sched_subport_params subport_params[MAX_SCHED_SUBPORTS] = {
179 .tb_rate = 1250000000,
182 .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
184 #ifdef RTE_SCHED_SUBPORT_TC_OV
190 static struct rte_sched_pipe_params pipe_profiles[RTE_SCHED_PIPE_PROFILES_PER_PORT] = {
195 .tc_rate = {305175, 305175, 305175, 305175},
197 #ifdef RTE_SCHED_SUBPORT_TC_OV
198 .tc_ov_weight = {1, 1, 1, 1},
201 .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
205 struct rte_sched_port_params port_params = {
207 .socket = 0, /* computed */
208 .rate = 0, /* computed */
209 .frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
210 .n_subports_per_port = 1,
211 .n_pipes_per_subport = 4096,
212 .qsize = {64, 64, 64, 64},
213 .pipe_profiles = pipe_profiles,
214 .n_pipe_profiles = 1,
218 /* Traffic Class 0 Colors Green / Yellow / Red */
219 [0][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
220 [0][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
221 [0][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
223 /* Traffic Class 1 - Colors Green / Yellow / Red */
224 [1][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
225 [1][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
226 [1][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
228 /* Traffic Class 2 - Colors Green / Yellow / Red */
229 [2][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
230 [2][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
231 [2][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
233 /* Traffic Class 3 - Colors Green / Yellow / Red */
234 [3][0] = {.min_th = 48, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
235 [3][1] = {.min_th = 40, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9},
236 [3][2] = {.min_th = 32, .max_th = 64, .maxp_inv = 10, .wq_log2 = 9}
238 #endif /* RTE_SCHED_RED */
241 static struct rte_sched_port *
242 app_init_sched_port(uint32_t portid, uint32_t socketid)
244 static char port_name[32]; /* static as referenced from global port_params*/
245 struct rte_eth_link link;
246 struct rte_sched_port *port = NULL;
247 uint32_t pipe, subport;
250 rte_eth_link_get((uint8_t)portid, &link);
252 port_params.socket = socketid;
253 port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
254 rte_snprintf(port_name, sizeof(port_name), "port_%d", portid);
255 port_params.name = port_name;
257 port = rte_sched_port_config(&port_params);
259 rte_exit(EXIT_FAILURE, "Unable to config sched port\n");
262 for (subport = 0; subport < port_params.n_subports_per_port; subport ++) {
263 err = rte_sched_subport_config(port, subport, &subport_params[subport]);
265 rte_exit(EXIT_FAILURE, "Unable to config sched subport %u, err=%d\n",
269 for (pipe = 0; pipe < port_params.n_pipes_per_subport; pipe ++) {
270 if (app_pipe_to_profile[subport][pipe] != -1) {
271 err = rte_sched_pipe_config(port, subport, pipe,
272 app_pipe_to_profile[subport][pipe]);
274 rte_exit(EXIT_FAILURE, "Unable to config sched pipe %u "
275 "for profile %d, err=%d\n", pipe,
276 app_pipe_to_profile[subport][pipe], err);
286 app_load_cfg_profile(const char *profile)
291 struct cfg_file *cfg_file = cfg_load(profile, 0);
292 if (cfg_file == NULL)
293 rte_exit(EXIT_FAILURE, "Cannot load configuration profile %s\n", profile);
295 cfg_load_port(cfg_file, &port_params);
296 cfg_load_subport(cfg_file, subport_params);
297 cfg_load_pipe(cfg_file, pipe_profiles);
307 char ring_name[MAX_NAME_LEN];
308 char pool_name[MAX_NAME_LEN];
311 if (rte_pmd_init_all() < 0)
312 rte_exit(EXIT_FAILURE, "Cannot init PMD\n");
314 if (rte_eal_pci_probe() < 0)
315 rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
317 if (rte_eth_dev_count() == 0)
318 rte_exit(EXIT_FAILURE, "No Ethernet port - bye\n");
320 /* load configuration profile */
321 if (app_load_cfg_profile(cfg_profile) != 0)
322 rte_exit(EXIT_FAILURE, "Invalid configuration profile\n");
324 /* Initialize each active flow */
325 for(i = 0; i < nb_pfc; i++) {
326 uint32_t socket = rte_lcore_to_socket_id(qos_conf[i].rx_core);
327 struct rte_ring *ring;
329 rte_snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].rx_core);
330 ring = rte_ring_lookup(ring_name);
332 qos_conf[i].rx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
333 socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
335 qos_conf[i].rx_ring = ring;
337 rte_snprintf(ring_name, MAX_NAME_LEN, "ring-%u-%u", i, qos_conf[i].tx_core);
338 ring = rte_ring_lookup(ring_name);
340 qos_conf[i].tx_ring = rte_ring_create(ring_name, ring_conf.ring_size,
341 socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
343 qos_conf[i].tx_ring = ring;
346 /* create the mbuf pools for each RX Port */
347 rte_snprintf(pool_name, MAX_NAME_LEN, "mbuf_pool%u", i);
348 qos_conf[i].mbuf_pool = rte_mempool_create(pool_name, NB_MBUF, MBUF_SIZE,
349 burst_conf.rx_burst * 4,
350 sizeof(struct rte_pktmbuf_pool_private),
351 rte_pktmbuf_pool_init, NULL,
352 rte_pktmbuf_init, NULL,
353 rte_eth_dev_socket_id(qos_conf[i].rx_port),
355 if (qos_conf[i].mbuf_pool == NULL)
356 rte_exit(EXIT_FAILURE, "Cannot init mbuf pool for socket %u\n", i);
358 //printf("MP = %d\n", rte_mempool_count(qos_conf[i].app_pktmbuf_pool));
360 app_init_port(qos_conf[i].rx_port, qos_conf[i].mbuf_pool);
361 app_init_port(qos_conf[i].tx_port, qos_conf[i].mbuf_pool);
363 qos_conf[i].sched_port = app_init_sched_port(qos_conf[i].rx_port, socket);
366 RTE_LOG(INFO, APP, "time stamp clock running at %" PRIu64 " Hz\n",
369 RTE_LOG(INFO, APP, "Ring sizes: NIC RX = %u, Mempool = %d SW queue = %u,"
370 "NIC TX = %u\n", ring_conf.rx_size, NB_MBUF, ring_conf.ring_size,
373 RTE_LOG(INFO, APP, "Burst sizes: RX read = %hu, RX write = %hu,\n"
374 " Worker read/QoS enqueue = %hu,\n"
375 " QoS dequeue = %hu, Worker write = %hu\n",
376 burst_conf.rx_burst, burst_conf.ring_burst, burst_conf.ring_burst,
377 burst_conf.qos_dequeue, burst_conf.tx_burst);
379 RTE_LOG(INFO, APP, "NIC thresholds RX (p = %hhu, h = %hhu, w = %hhu),"
380 "TX (p = %hhu, h = %hhu, w = %hhu)\n",
381 rx_thresh.pthresh, rx_thresh.hthresh, rx_thresh.wthresh,
382 tx_thresh.pthresh, tx_thresh.hthresh, tx_thresh.wthresh);