1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
8 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_ethdev.h>
13 #include <rte_cycles.h>
15 #include <rte_meter.h>
18 * Traffic metering configuration
21 #define APP_MODE_FWD 0
22 #define APP_MODE_SRTCM_COLOR_BLIND 1
23 #define APP_MODE_SRTCM_COLOR_AWARE 2
24 #define APP_MODE_TRTCM_COLOR_BLIND 3
25 #define APP_MODE_TRTCM_COLOR_AWARE 4
27 #define APP_MODE APP_MODE_SRTCM_COLOR_BLIND
33 #define APP_PKT_FLOW_POS 33
34 #define APP_PKT_COLOR_POS 5
37 #if APP_PKT_FLOW_POS > 64 || APP_PKT_COLOR_POS > 64
38 #error Byte offset needs to be less than 64
42 * Buffer pool configuration
46 #define MEMPOOL_CACHE_SIZE 256
48 static struct rte_mempool *pool = NULL;
54 static struct rte_eth_conf port_conf = {
56 .mq_mode = ETH_MQ_RX_RSS,
57 .max_rx_pkt_len = ETHER_MAX_LEN,
59 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
68 .mq_mode = ETH_DCB_NONE,
72 #define NIC_RX_QUEUE_DESC 1024
73 #define NIC_TX_QUEUE_DESC 1024
75 #define NIC_RX_QUEUE 0
76 #define NIC_TX_QUEUE 0
82 #define PKT_RX_BURST_MAX 32
83 #define PKT_TX_BURST_MAX 32
84 #define TIME_TX_DRAIN 200000ULL
86 static uint16_t port_rx;
87 static uint16_t port_tx;
88 static struct rte_mbuf *pkts_rx[PKT_RX_BURST_MAX];
89 struct rte_eth_dev_tx_buffer *tx_buffer;
91 struct rte_meter_srtcm_params app_srtcm_params = {
97 struct rte_meter_srtcm_profile app_srtcm_profile;
99 struct rte_meter_trtcm_params app_trtcm_params = {
106 struct rte_meter_trtcm_profile app_trtcm_profile;
108 #define APP_FLOWS_MAX 256
110 FLOW_METER app_flows[APP_FLOWS_MAX];
113 app_configure_flow_table(void)
118 ret = rte_meter_srtcm_profile_config(&app_srtcm_profile,
123 ret = rte_meter_trtcm_profile_config(&app_trtcm_profile,
128 for (i = 0; i < APP_FLOWS_MAX; i++) {
129 ret = FUNC_CONFIG(&app_flows[i], &PROFILE);
138 app_set_pkt_color(uint8_t *pkt_data, enum policer_action color)
140 pkt_data[APP_PKT_COLOR_POS] = (uint8_t)color;
144 app_pkt_handle(struct rte_mbuf *pkt, uint64_t time)
146 uint8_t input_color, output_color;
147 uint8_t *pkt_data = rte_pktmbuf_mtod(pkt, uint8_t *);
148 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) - sizeof(struct ether_hdr);
149 uint8_t flow_id = (uint8_t)(pkt_data[APP_PKT_FLOW_POS] & (APP_FLOWS_MAX - 1));
150 input_color = pkt_data[APP_PKT_COLOR_POS];
151 enum policer_action action;
153 /* color input is not used for blind modes */
154 output_color = (uint8_t) FUNC_METER(&app_flows[flow_id],
158 (enum rte_meter_color) input_color);
160 /* Apply policing and set the output color */
161 action = policer_table[input_color][output_color];
162 app_set_pkt_color(pkt_data, action);
168 static __attribute__((noreturn)) int
169 main_loop(__attribute__((unused)) void *dummy)
171 uint64_t current_time, last_time = rte_rdtsc();
172 uint32_t lcore_id = rte_lcore_id();
174 printf("Core %u: port RX = %d, port TX = %d\n", lcore_id, port_rx, port_tx);
180 /* Mechanism to avoid stale packets in the output buffer */
181 current_time = rte_rdtsc();
182 time_diff = current_time - last_time;
183 if (unlikely(time_diff > TIME_TX_DRAIN)) {
184 /* Flush tx buffer */
185 rte_eth_tx_buffer_flush(port_tx, NIC_TX_QUEUE, tx_buffer);
186 last_time = current_time;
189 /* Read packet burst from NIC RX */
190 nb_rx = rte_eth_rx_burst(port_rx, NIC_RX_QUEUE, pkts_rx, PKT_RX_BURST_MAX);
193 for (i = 0; i < nb_rx; i ++) {
194 struct rte_mbuf *pkt = pkts_rx[i];
196 /* Handle current packet */
197 if (app_pkt_handle(pkt, current_time) == DROP)
198 rte_pktmbuf_free(pkt);
200 rte_eth_tx_buffer(port_tx, NIC_TX_QUEUE, tx_buffer, pkt);
206 print_usage(const char *prgname)
208 printf ("%s [EAL options] -- -p PORTMASK\n"
209 " -p PORTMASK: hexadecimal bitmask of ports to configure\n",
214 parse_portmask(const char *portmask)
219 /* parse hexadecimal string */
220 pm = strtoul(portmask, &end, 16);
221 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
230 /* Parse the argument given in the command line of the application */
232 parse_args(int argc, char **argv)
237 char *prgname = argv[0];
238 static struct option lgopts[] = {
241 uint64_t port_mask, i, mask;
245 while ((opt = getopt_long(argc, argvopt, "p:", lgopts, &option_index)) != EOF) {
248 port_mask = parse_portmask(optarg);
249 if (port_mask == 0) {
250 printf("invalid port mask (null port mask)\n");
251 print_usage(prgname);
255 for (i = 0, mask = 1; i < 64; i ++, mask <<= 1){
256 if (mask & port_mask){
263 for (i = 0, mask = 1; i < 64; i ++, mask <<= 1){
264 if (mask & port_mask){
271 if (port_mask != 0) {
272 printf("invalid port mask (more than 2 ports)\n");
273 print_usage(prgname);
279 print_usage(prgname);
285 print_usage(prgname);
289 argv[optind-1] = prgname;
291 optind = 1; /* reset getopt lib */
296 main(int argc, char **argv)
299 uint16_t nb_rxd = NIC_RX_QUEUE_DESC;
300 uint16_t nb_txd = NIC_TX_QUEUE_DESC;
301 struct rte_eth_conf conf;
302 struct rte_eth_rxconf rxq_conf;
303 struct rte_eth_txconf txq_conf;
304 struct rte_eth_dev_info dev_info;
308 ret = rte_eal_init(argc, argv);
310 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
313 if (rte_lcore_count() != 1) {
314 rte_exit(EXIT_FAILURE, "This application does not accept more than one core. "
315 "Please adjust the \"-c COREMASK\" parameter accordingly.\n");
318 /* Application non-EAL arguments parse */
319 ret = parse_args(argc, argv);
321 rte_exit(EXIT_FAILURE, "Invalid input arguments\n");
323 /* Buffer pool init */
324 pool = rte_pktmbuf_pool_create("pool", NB_MBUF, MEMPOOL_CACHE_SIZE,
325 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
327 rte_exit(EXIT_FAILURE, "Buffer pool creation error\n");
331 rte_eth_dev_info_get(port_rx, &dev_info);
332 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
333 conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
335 conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
336 if (conf.rx_adv_conf.rss_conf.rss_hf !=
337 port_conf.rx_adv_conf.rss_conf.rss_hf) {
338 printf("Port %u modified RSS hash function based on hardware support,"
339 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
341 port_conf.rx_adv_conf.rss_conf.rss_hf,
342 conf.rx_adv_conf.rss_conf.rss_hf);
345 ret = rte_eth_dev_configure(port_rx, 1, 1, &conf);
347 rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
349 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_rx, &nb_rxd, &nb_txd);
351 rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n",
354 rxq_conf = dev_info.default_rxconf;
355 rxq_conf.offloads = conf.rxmode.offloads;
356 ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, nb_rxd,
357 rte_eth_dev_socket_id(port_rx),
360 rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
362 txq_conf = dev_info.default_txconf;
363 txq_conf.offloads = conf.txmode.offloads;
364 ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, nb_txd,
365 rte_eth_dev_socket_id(port_rx),
368 rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_rx, ret);
371 rte_eth_dev_info_get(port_tx, &dev_info);
372 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
373 conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
375 conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
376 if (conf.rx_adv_conf.rss_conf.rss_hf !=
377 port_conf.rx_adv_conf.rss_conf.rss_hf) {
378 printf("Port %u modified RSS hash function based on hardware support,"
379 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
381 port_conf.rx_adv_conf.rss_conf.rss_hf,
382 conf.rx_adv_conf.rss_conf.rss_hf);
385 ret = rte_eth_dev_configure(port_tx, 1, 1, &conf);
387 rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
389 nb_rxd = NIC_RX_QUEUE_DESC;
390 nb_txd = NIC_TX_QUEUE_DESC;
391 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_tx, &nb_rxd, &nb_txd);
393 rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n",
396 rxq_conf = dev_info.default_rxconf;
397 rxq_conf.offloads = conf.rxmode.offloads;
398 ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, nb_rxd,
399 rte_eth_dev_socket_id(port_tx),
402 rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
404 txq_conf = dev_info.default_txconf;
405 txq_conf.offloads = conf.txmode.offloads;
406 ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, nb_txd,
407 rte_eth_dev_socket_id(port_tx),
410 rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_tx, ret);
412 tx_buffer = rte_zmalloc_socket("tx_buffer",
413 RTE_ETH_TX_BUFFER_SIZE(PKT_TX_BURST_MAX), 0,
414 rte_eth_dev_socket_id(port_tx));
415 if (tx_buffer == NULL)
416 rte_exit(EXIT_FAILURE, "Port %d TX buffer allocation error\n",
419 rte_eth_tx_buffer_init(tx_buffer, PKT_TX_BURST_MAX);
421 ret = rte_eth_dev_start(port_rx);
423 rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_rx, ret);
425 ret = rte_eth_dev_start(port_tx);
427 rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_tx, ret);
429 rte_eth_promiscuous_enable(port_rx);
431 rte_eth_promiscuous_enable(port_tx);
433 /* App configuration */
434 ret = app_configure_flow_table();
436 rte_exit(EXIT_FAILURE, "Invalid configure flow table\n");
438 /* Launch per-lcore init on every lcore */
439 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
440 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
441 if (rte_eal_wait_lcore(lcore_id) < 0)