1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
8 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_ethdev.h>
13 #include <rte_cycles.h>
15 #include <rte_meter.h>
17 /* Traffic metering configuration. 8< */
18 #define APP_MODE_FWD 0
19 #define APP_MODE_SRTCM_COLOR_BLIND 1
20 #define APP_MODE_SRTCM_COLOR_AWARE 2
21 #define APP_MODE_TRTCM_COLOR_BLIND 3
22 #define APP_MODE_TRTCM_COLOR_AWARE 4
24 #define APP_MODE APP_MODE_SRTCM_COLOR_BLIND
25 /* >8 End of traffic metering configuration. */
31 #define APP_PKT_FLOW_POS 33
32 #define APP_PKT_COLOR_POS 5
35 #if APP_PKT_FLOW_POS > 64 || APP_PKT_COLOR_POS > 64
36 #error Byte offset needs to be less than 64
40 * Buffer pool configuration
44 #define MEMPOOL_CACHE_SIZE 256
46 static struct rte_mempool *pool = NULL;
52 static struct rte_eth_conf port_conf = {
54 .mq_mode = RTE_ETH_MQ_RX_RSS,
56 .offloads = RTE_ETH_RX_OFFLOAD_CHECKSUM,
61 .rss_hf = RTE_ETH_RSS_IP,
65 .mq_mode = RTE_ETH_MQ_TX_NONE,
69 #define NIC_RX_QUEUE_DESC 1024
70 #define NIC_TX_QUEUE_DESC 1024
72 #define NIC_RX_QUEUE 0
73 #define NIC_TX_QUEUE 0
79 #define RTE_MBUF_F_RX_BURST_MAX 32
80 #define RTE_MBUF_F_TX_BURST_MAX 32
81 #define TIME_TX_DRAIN 200000ULL
83 static uint16_t port_rx;
84 static uint16_t port_tx;
85 static struct rte_mbuf *pkts_rx[RTE_MBUF_F_RX_BURST_MAX];
86 struct rte_eth_dev_tx_buffer *tx_buffer;
88 /* Traffic meter parameters are configured in the application. 8< */
89 struct rte_meter_srtcm_params app_srtcm_params = {
95 struct rte_meter_srtcm_profile app_srtcm_profile;
97 struct rte_meter_trtcm_params app_trtcm_params = {
103 /* >8 End of traffic meter parameters are configured in the application. */
105 struct rte_meter_trtcm_profile app_trtcm_profile;
107 #define APP_FLOWS_MAX 256
109 FLOW_METER app_flows[APP_FLOWS_MAX];
112 app_configure_flow_table(void)
117 ret = rte_meter_srtcm_profile_config(&app_srtcm_profile,
122 ret = rte_meter_trtcm_profile_config(&app_trtcm_profile,
127 for (i = 0; i < APP_FLOWS_MAX; i++) {
128 ret = FUNC_CONFIG(&app_flows[i], &PROFILE);
137 app_set_pkt_color(uint8_t *pkt_data, enum policer_action color)
139 pkt_data[APP_PKT_COLOR_POS] = (uint8_t)color;
143 app_pkt_handle(struct rte_mbuf *pkt, uint64_t time)
145 uint8_t input_color, output_color;
146 uint8_t *pkt_data = rte_pktmbuf_mtod(pkt, uint8_t *);
147 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) -
148 sizeof(struct rte_ether_hdr);
149 uint8_t flow_id = (uint8_t)(pkt_data[APP_PKT_FLOW_POS] & (APP_FLOWS_MAX - 1));
150 input_color = pkt_data[APP_PKT_COLOR_POS];
151 enum policer_action action;
153 /* color input is not used for blind modes */
154 output_color = (uint8_t) FUNC_METER(&app_flows[flow_id],
158 (enum rte_color) input_color);
160 /* Apply policing and set the output color */
161 action = policer_table[input_color][output_color];
162 app_set_pkt_color(pkt_data, action);
168 static __rte_noreturn int
169 main_loop(__rte_unused void *dummy)
171 uint64_t current_time, last_time = rte_rdtsc();
172 uint32_t lcore_id = rte_lcore_id();
174 printf("Core %u: port RX = %d, port TX = %d\n", lcore_id, port_rx, port_tx);
180 /* Mechanism to avoid stale packets in the output buffer */
181 current_time = rte_rdtsc();
182 time_diff = current_time - last_time;
183 if (unlikely(time_diff > TIME_TX_DRAIN)) {
184 /* Flush tx buffer */
185 rte_eth_tx_buffer_flush(port_tx, NIC_TX_QUEUE, tx_buffer);
186 last_time = current_time;
189 /* Read packet burst from NIC RX */
190 nb_rx = rte_eth_rx_burst(port_rx, NIC_RX_QUEUE, pkts_rx, RTE_MBUF_F_RX_BURST_MAX);
193 for (i = 0; i < nb_rx; i ++) {
194 struct rte_mbuf *pkt = pkts_rx[i];
196 /* Handle current packet */
197 if (app_pkt_handle(pkt, current_time) == DROP)
198 rte_pktmbuf_free(pkt);
200 rte_eth_tx_buffer(port_tx, NIC_TX_QUEUE, tx_buffer, pkt);
206 print_usage(const char *prgname)
208 printf ("%s [EAL options] -- -p PORTMASK\n"
209 " -p PORTMASK: hexadecimal bitmask of ports to configure\n",
214 parse_portmask(const char *portmask)
219 /* parse hexadecimal string */
220 pm = strtoul(portmask, &end, 16);
221 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
227 /* Parse the argument given in the command line of the application */
229 parse_args(int argc, char **argv)
234 char *prgname = argv[0];
235 static struct option lgopts[] = {
238 uint64_t port_mask, i, mask;
242 while ((opt = getopt_long(argc, argvopt, "p:", lgopts, &option_index)) != EOF) {
245 port_mask = parse_portmask(optarg);
246 if (port_mask == 0) {
247 printf("invalid port mask (null port mask)\n");
248 print_usage(prgname);
252 for (i = 0, mask = 1; i < 64; i ++, mask <<= 1){
253 if (mask & port_mask){
260 for (i = 0, mask = 1; i < 64; i ++, mask <<= 1){
261 if (mask & port_mask){
268 if (port_mask != 0) {
269 printf("invalid port mask (more than 2 ports)\n");
270 print_usage(prgname);
276 print_usage(prgname);
282 print_usage(prgname);
286 argv[optind-1] = prgname;
288 optind = 1; /* reset getopt lib */
293 main(int argc, char **argv)
296 uint16_t nb_rxd = NIC_RX_QUEUE_DESC;
297 uint16_t nb_txd = NIC_TX_QUEUE_DESC;
298 struct rte_eth_conf conf;
299 struct rte_eth_rxconf rxq_conf;
300 struct rte_eth_txconf txq_conf;
301 struct rte_eth_dev_info dev_info;
305 ret = rte_eal_init(argc, argv);
307 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
310 if (rte_lcore_count() != 1) {
311 rte_exit(EXIT_FAILURE, "This application does not accept more than one core. "
312 "Please adjust the \"-c COREMASK\" parameter accordingly.\n");
315 /* Application non-EAL arguments parse */
316 ret = parse_args(argc, argv);
318 rte_exit(EXIT_FAILURE, "Invalid input arguments\n");
320 /* Buffer pool init */
321 pool = rte_pktmbuf_pool_create("pool", NB_MBUF, MEMPOOL_CACHE_SIZE,
322 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
324 rte_exit(EXIT_FAILURE, "Buffer pool creation error\n");
329 ret = rte_eth_dev_info_get(port_rx, &dev_info);
331 rte_exit(EXIT_FAILURE,
332 "Error during getting device (port %u) info: %s\n",
333 port_rx, strerror(-ret));
335 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
336 conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
338 conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
339 if (conf.rx_adv_conf.rss_conf.rss_hf !=
340 port_conf.rx_adv_conf.rss_conf.rss_hf) {
341 printf("Port %u modified RSS hash function based on hardware support,"
342 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
344 port_conf.rx_adv_conf.rss_conf.rss_hf,
345 conf.rx_adv_conf.rss_conf.rss_hf);
348 ret = rte_eth_dev_configure(port_rx, 1, 1, &conf);
350 rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
352 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_rx, &nb_rxd, &nb_txd);
354 rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n",
357 rxq_conf = dev_info.default_rxconf;
358 rxq_conf.offloads = conf.rxmode.offloads;
359 ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, nb_rxd,
360 rte_eth_dev_socket_id(port_rx),
363 rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
365 txq_conf = dev_info.default_txconf;
366 txq_conf.offloads = conf.txmode.offloads;
367 ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, nb_txd,
368 rte_eth_dev_socket_id(port_rx),
371 rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_rx, ret);
375 ret = rte_eth_dev_info_get(port_tx, &dev_info);
377 rte_exit(EXIT_FAILURE,
378 "Error during getting device (port %u) info: %s\n",
379 port_tx, strerror(-ret));
381 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
382 conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
384 conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
385 if (conf.rx_adv_conf.rss_conf.rss_hf !=
386 port_conf.rx_adv_conf.rss_conf.rss_hf) {
387 printf("Port %u modified RSS hash function based on hardware support,"
388 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
390 port_conf.rx_adv_conf.rss_conf.rss_hf,
391 conf.rx_adv_conf.rss_conf.rss_hf);
394 ret = rte_eth_dev_configure(port_tx, 1, 1, &conf);
396 rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
398 nb_rxd = NIC_RX_QUEUE_DESC;
399 nb_txd = NIC_TX_QUEUE_DESC;
400 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_tx, &nb_rxd, &nb_txd);
402 rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n",
405 rxq_conf = dev_info.default_rxconf;
406 rxq_conf.offloads = conf.rxmode.offloads;
407 ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, nb_rxd,
408 rte_eth_dev_socket_id(port_tx),
411 rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
413 txq_conf = dev_info.default_txconf;
414 txq_conf.offloads = conf.txmode.offloads;
415 ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, nb_txd,
416 rte_eth_dev_socket_id(port_tx),
419 rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_tx, ret);
421 tx_buffer = rte_zmalloc_socket("tx_buffer",
422 RTE_ETH_TX_BUFFER_SIZE(RTE_MBUF_F_TX_BURST_MAX), 0,
423 rte_eth_dev_socket_id(port_tx));
424 if (tx_buffer == NULL)
425 rte_exit(EXIT_FAILURE, "Port %d TX buffer allocation error\n",
428 rte_eth_tx_buffer_init(tx_buffer, RTE_MBUF_F_TX_BURST_MAX);
430 ret = rte_eth_dev_start(port_rx);
432 rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_rx, ret);
434 ret = rte_eth_dev_start(port_tx);
436 rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_tx, ret);
438 ret = rte_eth_promiscuous_enable(port_rx);
440 rte_exit(EXIT_FAILURE,
441 "Port %d promiscuous mode enable error (%s)\n",
442 port_rx, rte_strerror(-ret));
444 ret = rte_eth_promiscuous_enable(port_tx);
446 rte_exit(EXIT_FAILURE,
447 "Port %d promiscuous mode enable error (%s)\n",
448 port_rx, rte_strerror(-ret));
450 /* App configuration */
451 ret = app_configure_flow_table();
453 rte_exit(EXIT_FAILURE, "Invalid configure flow table\n");
455 /* Launch per-lcore init on every lcore */
456 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MAIN);
457 RTE_LCORE_FOREACH_WORKER(lcore_id) {
458 if (rte_eal_wait_lcore(lcore_id) < 0)
462 /* clean up the EAL */