4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <sys/queue.h>
47 #include <rte_common.h>
49 #include <rte_memory.h>
50 #include <rte_memcpy.h>
51 #include <rte_memzone.h>
52 #include <rte_tailq.h>
54 #include <rte_per_lcore.h>
55 #include <rte_launch.h>
56 #include <rte_atomic.h>
57 #include <rte_cycles.h>
58 #include <rte_prefetch.h>
59 #include <rte_lcore.h>
60 #include <rte_per_lcore.h>
61 #include <rte_branch_prediction.h>
62 #include <rte_interrupts.h>
64 #include <rte_random.h>
65 #include <rte_debug.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
70 #include <rte_mempool.h>
72 #include <rte_memcpy.h>
76 /* basic constants used in application */
80 #define NUM_QUEUES 128
82 #define NUM_MBUFS 64*1024
83 #define MBUF_CACHE_SIZE 64
84 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
86 /* Basic application settings */
87 #define NUM_POOLS ETH_16_POOLS /* can be ETH_16_POOLS or ETH_32_POOLS */
93 * RX and TX Prefetch, Host, and Write-back threshold values should be
94 * carefully set for optimal performance. Consult the network
95 * controller's datasheet and supporting DPDK documentation for guidance
96 * on how these parameters should be set.
98 /* Default configuration for rx and tx thresholds etc. */
99 static const struct rte_eth_rxconf rx_conf_default = {
108 * These default values are optimized for use with the Intel(R) 82599 10 GbE
109 * Controller and the DPDK ixgbe PMD. Consider using other values for other
110 * network controllers and/or network drivers.
112 static const struct rte_eth_txconf tx_conf_default = {
118 .tx_free_thresh = 0, /* Use PMD default values */
119 .tx_rs_thresh = 0, /* Use PMD default values */
122 /* empty vmdq+dcb configuration structure. Filled in programatically */
123 static const struct rte_eth_conf vmdq_dcb_conf_default = {
125 .mq_mode = ETH_VMDQ_DCB,
127 .header_split = 0, /**< Header Split disabled */
128 .hw_ip_checksum = 0, /**< IP checksum offload disabled */
129 .hw_vlan_filter = 0, /**< VLAN filtering disabled */
130 .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
133 .mq_mode = ETH_DCB_NONE,
137 * should be overridden separately in code with
141 .nb_queue_pools = NUM_POOLS,
142 .enable_default_pool = 0,
145 .pool_map = {{0, 0},},
151 /* array used for printing out statistics */
152 volatile unsigned long rxPackets[ NUM_QUEUES ] = {0};
154 const uint16_t vlan_tags[] = {
155 0, 1, 2, 3, 4, 5, 6, 7,
156 8, 9, 10, 11, 12, 13, 14, 15,
157 16, 17, 18, 19, 20, 21, 22, 23,
158 24, 25, 26, 27, 28, 29, 30, 31
161 /* Builds up the correct configuration for vmdq+dcb based on the vlan tags array
162 * given above, and the number of traffic classes available for use. */
164 get_eth_conf(struct rte_eth_conf *eth_conf, enum rte_eth_nb_pools num_pools)
166 struct rte_eth_vmdq_dcb_conf conf;
169 if (num_pools != ETH_16_POOLS && num_pools != ETH_32_POOLS ) return -1;
171 conf.nb_queue_pools = num_pools;
172 conf.enable_default_pool = 0;
173 conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
174 for (i = 0; i < conf.nb_pool_maps; i++){
175 conf.pool_map[i].vlan_id = vlan_tags[ i ];
176 conf.pool_map[i].pools = 1 << (i % num_pools);
178 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
179 conf.dcb_queue[i] = (uint8_t)(i % (NUM_QUEUES/num_pools));
181 rte_memcpy(eth_conf, &vmdq_dcb_conf_default, sizeof(*eth_conf));
182 rte_memcpy(ð_conf->rx_adv_conf.vmdq_dcb_conf, &conf,
183 sizeof(eth_conf->rx_adv_conf.vmdq_dcb_conf));
188 * Initialises a given port using global settings and with the rx buffers
189 * coming from the mbuf_pool passed as parameter
192 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
194 struct rte_eth_conf port_conf;
195 const uint16_t rxRings = ETH_VMDQ_DCB_NUM_QUEUES,
196 txRings = (uint16_t)rte_lcore_count();
197 const uint16_t rxRingSize = 128, txRingSize = 512;
201 get_eth_conf(&port_conf, NUM_POOLS);
203 if (port >= rte_eth_dev_count()) return -1;
205 retval = rte_eth_dev_configure(port, rxRings, txRings, &port_conf);
209 for (q = 0; q < rxRings; q ++) {
210 retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
211 SOCKET0, &rx_conf_default,
217 for (q = 0; q < txRings; q ++) {
218 retval = rte_eth_tx_queue_setup(port, q, txRingSize,
219 SOCKET0, &tx_conf_default);
224 retval = rte_eth_dev_start(port);
231 #ifndef RTE_EXEC_ENV_BAREMETAL
232 /* When we receive a HUP signal, print out our stats */
234 sighup_handler(int signum)
237 for (q = 0; q < NUM_QUEUES; q ++) {
238 if (q % (NUM_QUEUES/NUM_POOLS) == 0)
239 printf("\nPool %u: ", q/(NUM_QUEUES/NUM_POOLS));
240 printf("%lu ", rxPackets[ q ]);
242 printf("\nFinished handling signal %d\n", signum);
247 * Main thread that does the work, reading from INPUT_PORT
248 * and writing to OUTPUT_PORT
250 static __attribute__((noreturn)) int
251 lcore_main(void *arg)
253 const uintptr_t core_num = (uintptr_t)arg;
254 const unsigned num_cores = rte_lcore_count();
255 uint16_t startQueue = (uint16_t)(core_num * (NUM_QUEUES/num_cores));
256 uint16_t endQueue = (uint16_t)(startQueue + (NUM_QUEUES/num_cores));
259 printf("Core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_num,
260 rte_lcore_id(), startQueue, endQueue - 1);
263 struct rte_mbuf *buf[32];
264 const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]);
266 for (q = startQueue; q < endQueue; q++) {
267 const uint16_t rxCount = rte_eth_rx_burst(RX_PORT,
271 rxPackets[q] += rxCount;
273 const uint16_t txCount = rte_eth_tx_burst(TX_PORT,
274 (uint16_t)core_num, buf, rxCount);
275 if (txCount != rxCount) {
276 for (i = txCount; i < rxCount; i++)
277 rte_pktmbuf_free(buf[i]);
283 /* Main function, does initialisation and calls the per-lcore functions */
285 MAIN(int argc, char *argv[])
288 struct rte_mempool *mbuf_pool;
292 #ifndef RTE_EXEC_ENV_BAREMETAL
293 signal(SIGHUP, sighup_handler);
296 if (rte_eal_init(argc, argv) < 0)
297 rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
298 if (rte_igb_pmd_init() != 0 ||
299 rte_ixgbe_pmd_init() != 0 ||
300 rte_eal_pci_probe() != 0)
301 rte_exit(EXIT_FAILURE, "Error with NIC driver initialization\n");
303 cores = rte_lcore_count();
304 if ((cores & (cores - 1)) != 0 || cores > 16) {
305 rte_exit(EXIT_FAILURE,
306 "This program can only run on 2,4,8 or 16 cores\n\n");
309 mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS,
310 MBUF_SIZE, MBUF_CACHE_SIZE,
311 sizeof(struct rte_pktmbuf_pool_private),
312 rte_pktmbuf_pool_init, NULL,
313 rte_pktmbuf_init, NULL,
315 if (mbuf_pool == NULL)
316 rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
318 if (port_init(RX_PORT, mbuf_pool) != 0 ||
319 port_init(TX_PORT, mbuf_pool) != 0)
320 rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
322 /* call lcore_main() on every slave lcore */
324 RTE_LCORE_FOREACH_SLAVE(lcore_id) {
325 rte_eal_remote_launch(lcore_main, (void*)i++, lcore_id);
327 /* call on master too */
328 (void) lcore_main((void*)i);