1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
8 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_ethdev.h>
13 #include <rte_cycles.h>
15 #include <rte_meter.h>
18 * Traffic metering configuration
21 #define APP_MODE_FWD 0
22 #define APP_MODE_SRTCM_COLOR_BLIND 1
23 #define APP_MODE_SRTCM_COLOR_AWARE 2
24 #define APP_MODE_TRTCM_COLOR_BLIND 3
25 #define APP_MODE_TRTCM_COLOR_AWARE 4
27 #define APP_MODE APP_MODE_SRTCM_COLOR_BLIND
33 #define APP_PKT_FLOW_POS 33
34 #define APP_PKT_COLOR_POS 5
37 #if APP_PKT_FLOW_POS > 64 || APP_PKT_COLOR_POS > 64
38 #error Byte offset needs to be less than 64
42 * Buffer pool configuration
46 #define MEMPOOL_CACHE_SIZE 256
48 static struct rte_mempool *pool = NULL;
54 static struct rte_eth_conf port_conf = {
56 .mq_mode = ETH_MQ_RX_RSS,
57 .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
59 .offloads = DEV_RX_OFFLOAD_CHECKSUM,
68 .mq_mode = ETH_DCB_NONE,
72 #define NIC_RX_QUEUE_DESC 1024
73 #define NIC_TX_QUEUE_DESC 1024
75 #define NIC_RX_QUEUE 0
76 #define NIC_TX_QUEUE 0
82 #define PKT_RX_BURST_MAX 32
83 #define PKT_TX_BURST_MAX 32
84 #define TIME_TX_DRAIN 200000ULL
86 static uint16_t port_rx;
87 static uint16_t port_tx;
88 static struct rte_mbuf *pkts_rx[PKT_RX_BURST_MAX];
89 struct rte_eth_dev_tx_buffer *tx_buffer;
91 struct rte_meter_srtcm_params app_srtcm_params = {
97 struct rte_meter_srtcm_profile app_srtcm_profile;
99 struct rte_meter_trtcm_params app_trtcm_params = {
106 struct rte_meter_trtcm_profile app_trtcm_profile;
108 #define APP_FLOWS_MAX 256
110 FLOW_METER app_flows[APP_FLOWS_MAX];
113 app_configure_flow_table(void)
118 ret = rte_meter_srtcm_profile_config(&app_srtcm_profile,
123 ret = rte_meter_trtcm_profile_config(&app_trtcm_profile,
128 for (i = 0; i < APP_FLOWS_MAX; i++) {
129 ret = FUNC_CONFIG(&app_flows[i], &PROFILE);
138 app_set_pkt_color(uint8_t *pkt_data, enum policer_action color)
140 pkt_data[APP_PKT_COLOR_POS] = (uint8_t)color;
144 app_pkt_handle(struct rte_mbuf *pkt, uint64_t time)
146 uint8_t input_color, output_color;
147 uint8_t *pkt_data = rte_pktmbuf_mtod(pkt, uint8_t *);
148 uint32_t pkt_len = rte_pktmbuf_pkt_len(pkt) -
149 sizeof(struct rte_ether_hdr);
150 uint8_t flow_id = (uint8_t)(pkt_data[APP_PKT_FLOW_POS] & (APP_FLOWS_MAX - 1));
151 input_color = pkt_data[APP_PKT_COLOR_POS];
152 enum policer_action action;
154 /* color input is not used for blind modes */
155 output_color = (uint8_t) FUNC_METER(&app_flows[flow_id],
159 (enum rte_meter_color) input_color);
161 /* Apply policing and set the output color */
162 action = policer_table[input_color][output_color];
163 app_set_pkt_color(pkt_data, action);
169 static __attribute__((noreturn)) int
170 main_loop(__attribute__((unused)) void *dummy)
172 uint64_t current_time, last_time = rte_rdtsc();
173 uint32_t lcore_id = rte_lcore_id();
175 printf("Core %u: port RX = %d, port TX = %d\n", lcore_id, port_rx, port_tx);
181 /* Mechanism to avoid stale packets in the output buffer */
182 current_time = rte_rdtsc();
183 time_diff = current_time - last_time;
184 if (unlikely(time_diff > TIME_TX_DRAIN)) {
185 /* Flush tx buffer */
186 rte_eth_tx_buffer_flush(port_tx, NIC_TX_QUEUE, tx_buffer);
187 last_time = current_time;
190 /* Read packet burst from NIC RX */
191 nb_rx = rte_eth_rx_burst(port_rx, NIC_RX_QUEUE, pkts_rx, PKT_RX_BURST_MAX);
194 for (i = 0; i < nb_rx; i ++) {
195 struct rte_mbuf *pkt = pkts_rx[i];
197 /* Handle current packet */
198 if (app_pkt_handle(pkt, current_time) == DROP)
199 rte_pktmbuf_free(pkt);
201 rte_eth_tx_buffer(port_tx, NIC_TX_QUEUE, tx_buffer, pkt);
207 print_usage(const char *prgname)
209 printf ("%s [EAL options] -- -p PORTMASK\n"
210 " -p PORTMASK: hexadecimal bitmask of ports to configure\n",
215 parse_portmask(const char *portmask)
220 /* parse hexadecimal string */
221 pm = strtoul(portmask, &end, 16);
222 if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
231 /* Parse the argument given in the command line of the application */
233 parse_args(int argc, char **argv)
238 char *prgname = argv[0];
239 static struct option lgopts[] = {
242 uint64_t port_mask, i, mask;
246 while ((opt = getopt_long(argc, argvopt, "p:", lgopts, &option_index)) != EOF) {
249 port_mask = parse_portmask(optarg);
250 if (port_mask == 0) {
251 printf("invalid port mask (null port mask)\n");
252 print_usage(prgname);
256 for (i = 0, mask = 1; i < 64; i ++, mask <<= 1){
257 if (mask & port_mask){
264 for (i = 0, mask = 1; i < 64; i ++, mask <<= 1){
265 if (mask & port_mask){
272 if (port_mask != 0) {
273 printf("invalid port mask (more than 2 ports)\n");
274 print_usage(prgname);
280 print_usage(prgname);
286 print_usage(prgname);
290 argv[optind-1] = prgname;
292 optind = 1; /* reset getopt lib */
297 main(int argc, char **argv)
300 uint16_t nb_rxd = NIC_RX_QUEUE_DESC;
301 uint16_t nb_txd = NIC_TX_QUEUE_DESC;
302 struct rte_eth_conf conf;
303 struct rte_eth_rxconf rxq_conf;
304 struct rte_eth_txconf txq_conf;
305 struct rte_eth_dev_info dev_info;
309 ret = rte_eal_init(argc, argv);
311 rte_exit(EXIT_FAILURE, "Invalid EAL parameters\n");
314 if (rte_lcore_count() != 1) {
315 rte_exit(EXIT_FAILURE, "This application does not accept more than one core. "
316 "Please adjust the \"-c COREMASK\" parameter accordingly.\n");
319 /* Application non-EAL arguments parse */
320 ret = parse_args(argc, argv);
322 rte_exit(EXIT_FAILURE, "Invalid input arguments\n");
324 /* Buffer pool init */
325 pool = rte_pktmbuf_pool_create("pool", NB_MBUF, MEMPOOL_CACHE_SIZE,
326 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
328 rte_exit(EXIT_FAILURE, "Buffer pool creation error\n");
332 rte_eth_dev_info_get(port_rx, &dev_info);
333 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
334 conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
336 conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
337 if (conf.rx_adv_conf.rss_conf.rss_hf !=
338 port_conf.rx_adv_conf.rss_conf.rss_hf) {
339 printf("Port %u modified RSS hash function based on hardware support,"
340 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
342 port_conf.rx_adv_conf.rss_conf.rss_hf,
343 conf.rx_adv_conf.rss_conf.rss_hf);
346 ret = rte_eth_dev_configure(port_rx, 1, 1, &conf);
348 rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
350 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_rx, &nb_rxd, &nb_txd);
352 rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n",
355 rxq_conf = dev_info.default_rxconf;
356 rxq_conf.offloads = conf.rxmode.offloads;
357 ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, nb_rxd,
358 rte_eth_dev_socket_id(port_rx),
361 rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
363 txq_conf = dev_info.default_txconf;
364 txq_conf.offloads = conf.txmode.offloads;
365 ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, nb_txd,
366 rte_eth_dev_socket_id(port_rx),
369 rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_rx, ret);
372 rte_eth_dev_info_get(port_tx, &dev_info);
373 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
374 conf.txmode.offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
376 conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads;
377 if (conf.rx_adv_conf.rss_conf.rss_hf !=
378 port_conf.rx_adv_conf.rss_conf.rss_hf) {
379 printf("Port %u modified RSS hash function based on hardware support,"
380 "requested:%#"PRIx64" configured:%#"PRIx64"\n",
382 port_conf.rx_adv_conf.rss_conf.rss_hf,
383 conf.rx_adv_conf.rss_conf.rss_hf);
386 ret = rte_eth_dev_configure(port_tx, 1, 1, &conf);
388 rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
390 nb_rxd = NIC_RX_QUEUE_DESC;
391 nb_txd = NIC_TX_QUEUE_DESC;
392 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_tx, &nb_rxd, &nb_txd);
394 rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n",
397 rxq_conf = dev_info.default_rxconf;
398 rxq_conf.offloads = conf.rxmode.offloads;
399 ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, nb_rxd,
400 rte_eth_dev_socket_id(port_tx),
403 rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
405 txq_conf = dev_info.default_txconf;
406 txq_conf.offloads = conf.txmode.offloads;
407 ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, nb_txd,
408 rte_eth_dev_socket_id(port_tx),
411 rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_tx, ret);
413 tx_buffer = rte_zmalloc_socket("tx_buffer",
414 RTE_ETH_TX_BUFFER_SIZE(PKT_TX_BURST_MAX), 0,
415 rte_eth_dev_socket_id(port_tx));
416 if (tx_buffer == NULL)
417 rte_exit(EXIT_FAILURE, "Port %d TX buffer allocation error\n",
420 rte_eth_tx_buffer_init(tx_buffer, PKT_TX_BURST_MAX);
422 ret = rte_eth_dev_start(port_rx);
424 rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_rx, ret);
426 ret = rte_eth_dev_start(port_tx);
428 rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_tx, ret);
430 rte_eth_promiscuous_enable(port_rx);
432 rte_eth_promiscuous_enable(port_tx);
434 /* App configuration */
435 ret = app_configure_flow_table();
437 rte_exit(EXIT_FAILURE, "Invalid configure flow table\n");
439 /* Launch per-lcore init on every lcore */
440 rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
441 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
442 if (rte_eal_wait_lcore(lcore_id) < 0)