1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
9 #include <rte_ethdev.h>
10 #include <rte_cycles.h>
11 #include <rte_lcore.h>
13 #include <rte_mbuf_dyn.h>
15 #define RX_RING_SIZE 1024
16 #define TX_RING_SIZE 1024
18 #define NUM_MBUFS 8191
19 #define MBUF_CACHE_SIZE 250
22 static int hwts_dynfield_offset = -1;
24 static inline rte_mbuf_timestamp_t *
25 hwts_field(struct rte_mbuf *mbuf)
27 return RTE_MBUF_DYNFIELD(mbuf,
28 hwts_dynfield_offset, rte_mbuf_timestamp_t *);
31 typedef uint64_t tsc_t;
32 static int tsc_dynfield_offset = -1;
35 tsc_field(struct rte_mbuf *mbuf)
37 return RTE_MBUF_DYNFIELD(mbuf, tsc_dynfield_offset, tsc_t *);
40 static const char usage[] =
41 "%s EAL_ARGS -- [-t]\n";
44 uint64_t total_cycles;
45 uint64_t total_queue_cycles;
51 #define TICKS_PER_CYCLE_SHIFT 16
52 static uint64_t ticks_per_cycle_mult;
54 /* Callback added to the RX port and applied to packets. 8< */
56 add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
57 struct rte_mbuf **pkts, uint16_t nb_pkts,
58 uint16_t max_pkts __rte_unused, void *_ __rte_unused)
61 uint64_t now = rte_rdtsc();
63 for (i = 0; i < nb_pkts; i++)
64 *tsc_field(pkts[i]) = now;
67 /* >8 End of callback addition and application. */
69 /* Callback is added to the TX port. 8< */
71 calc_latency(uint16_t port, uint16_t qidx __rte_unused,
72 struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused)
75 uint64_t queue_ticks = 0;
76 uint64_t now = rte_rdtsc();
81 rte_eth_read_clock(port, &ticks);
83 for (i = 0; i < nb_pkts; i++) {
84 cycles += now - *tsc_field(pkts[i]);
86 queue_ticks += ticks - *hwts_field(pkts[i]);
89 latency_numbers.total_cycles += cycles;
91 latency_numbers.total_queue_cycles += (queue_ticks
92 * ticks_per_cycle_mult) >> TICKS_PER_CYCLE_SHIFT;
94 latency_numbers.total_pkts += nb_pkts;
96 if (latency_numbers.total_pkts > (100 * 1000 * 1000ULL)) {
97 printf("Latency = %"PRIu64" cycles\n",
98 latency_numbers.total_cycles / latency_numbers.total_pkts);
99 if (hw_timestamping) {
100 printf("Latency from HW = %"PRIu64" cycles\n",
101 latency_numbers.total_queue_cycles
102 / latency_numbers.total_pkts);
104 latency_numbers.total_cycles = 0;
105 latency_numbers.total_queue_cycles = 0;
106 latency_numbers.total_pkts = 0;
110 /* >8 End of callback addition. */
113 * Initialises a given port using global settings and with the rx buffers
114 * coming from the mbuf_pool passed as parameter
117 /* Port initialization. 8< */
119 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
121 struct rte_eth_conf port_conf;
122 const uint16_t rx_rings = 1, tx_rings = 1;
123 uint16_t nb_rxd = RX_RING_SIZE;
124 uint16_t nb_txd = TX_RING_SIZE;
127 struct rte_eth_dev_info dev_info;
128 struct rte_eth_rxconf rxconf;
129 struct rte_eth_txconf txconf;
131 if (!rte_eth_dev_is_valid_port(port))
134 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
136 retval = rte_eth_dev_info_get(port, &dev_info);
138 printf("Error during getting device (port %u) info: %s\n",
139 port, strerror(-retval));
144 if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
145 port_conf.txmode.offloads |=
146 RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
148 if (hw_timestamping) {
149 if (!(dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TIMESTAMP)) {
150 printf("\nERROR: Port %u does not support hardware timestamping\n"
154 port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_TIMESTAMP;
155 rte_mbuf_dyn_rx_timestamp_register(&hwts_dynfield_offset, NULL);
156 if (hwts_dynfield_offset < 0) {
157 printf("ERROR: Failed to register timestamp field\n");
162 retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
166 retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
170 rxconf = dev_info.default_rxconf;
172 for (q = 0; q < rx_rings; q++) {
173 retval = rte_eth_rx_queue_setup(port, q, nb_rxd,
174 rte_eth_dev_socket_id(port), &rxconf, mbuf_pool);
179 txconf = dev_info.default_txconf;
180 txconf.offloads = port_conf.txmode.offloads;
181 for (q = 0; q < tx_rings; q++) {
182 retval = rte_eth_tx_queue_setup(port, q, nb_txd,
183 rte_eth_dev_socket_id(port), &txconf);
188 retval = rte_eth_dev_start(port);
192 if (hw_timestamping && ticks_per_cycle_mult == 0) {
193 uint64_t cycles_base = rte_rdtsc();
195 retval = rte_eth_read_clock(port, &ticks_base);
199 uint64_t cycles = rte_rdtsc();
201 rte_eth_read_clock(port, &ticks);
202 uint64_t c_freq = cycles - cycles_base;
203 uint64_t t_freq = ticks - ticks_base;
204 double freq_mult = (double)c_freq / t_freq;
205 printf("TSC Freq ~= %" PRIu64
206 "\nHW Freq ~= %" PRIu64
208 c_freq * 10, t_freq * 10, freq_mult);
209 /* TSC will be faster than internal ticks so freq_mult is > 0
210 * We convert the multiplication to an integer shift & mult
212 ticks_per_cycle_mult = (1 << TICKS_PER_CYCLE_SHIFT) / freq_mult;
215 struct rte_ether_addr addr;
217 retval = rte_eth_macaddr_get(port, &addr);
219 printf("Failed to get MAC address on port %u: %s\n",
220 port, rte_strerror(-retval));
223 printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
224 " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
226 RTE_ETHER_ADDR_BYTES(&addr));
228 retval = rte_eth_promiscuous_enable(port);
232 /* RX and TX callbacks are added to the ports. 8< */
233 rte_eth_add_rx_callback(port, 0, add_timestamps, NULL);
234 rte_eth_add_tx_callback(port, 0, calc_latency, NULL);
235 /* >8 End of RX and TX callbacks. */
239 /* >8 End of port initialization. */
242 * Main thread that does the work, reading from INPUT_PORT
243 * and writing to OUTPUT_PORT
245 static __rte_noreturn void
250 RTE_ETH_FOREACH_DEV(port)
251 if (rte_eth_dev_socket_id(port) > 0 &&
252 rte_eth_dev_socket_id(port) !=
253 (int)rte_socket_id())
254 printf("WARNING, port %u is on remote NUMA node to "
255 "polling thread.\n\tPerformance will "
256 "not be optimal.\n", port);
258 printf("\nCore %u forwarding packets. [Ctrl+C to quit]\n",
261 RTE_ETH_FOREACH_DEV(port) {
262 struct rte_mbuf *bufs[BURST_SIZE];
263 const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
265 if (unlikely(nb_rx == 0))
267 const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
269 if (unlikely(nb_tx < nb_rx)) {
272 for (buf = nb_tx; buf < nb_rx; buf++)
273 rte_pktmbuf_free(bufs[buf]);
279 /* Main function, does initialisation and calls the per-lcore functions */
281 main(int argc, char *argv[])
283 struct rte_mempool *mbuf_pool;
286 struct option lgopts[] = {
289 int opt, option_index;
291 static const struct rte_mbuf_dynfield tsc_dynfield_desc = {
292 .name = "example_bbdev_dynfield_tsc",
293 .size = sizeof(tsc_t),
294 .align = __alignof__(tsc_t),
298 int ret = rte_eal_init(argc, argv);
301 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
305 while ((opt = getopt_long(argc, argv, "t", lgopts, &option_index))
312 printf(usage, argv[0]);
315 optind = 1; /* reset getopt lib */
317 nb_ports = rte_eth_dev_count_avail();
318 if (nb_ports < 2 || (nb_ports & 1))
319 rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
321 mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
322 NUM_MBUFS * nb_ports, MBUF_CACHE_SIZE, 0,
323 RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
324 if (mbuf_pool == NULL)
325 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
327 tsc_dynfield_offset =
328 rte_mbuf_dynfield_register(&tsc_dynfield_desc);
329 if (tsc_dynfield_offset < 0)
330 rte_exit(EXIT_FAILURE, "Cannot register mbuf field\n");
332 /* initialize all ports */
333 RTE_ETH_FOREACH_DEV(portid)
334 if (port_init(portid, mbuf_pool) != 0)
335 rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16"\n",
338 if (rte_lcore_count() > 1)
339 printf("\nWARNING: Too much enabled lcores - "
340 "App uses only 1 lcore\n");
342 /* call lcore_main on main core only */
345 /* clean up the EAL */