1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #include <rte_string_fns.h>
14 #include <rte_mempool.h>
15 #include <rte_ethdev.h>
16 #include <rte_bus_pci.h>
17 #include <rte_cycles.h>
21 #define MAX_PACKET_SZ 2048
22 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
23 #define PKT_BURST_SZ 32
24 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
28 #define KNI_TIMEOUT_MS 5000 /* ms */
30 #define IFCONFIG "/sbin/ifconfig "
31 #define TEST_KNI_PORT "test_kni_port"
32 #define KNI_TEST_MAX_PORTS 4
33 /* The threshold number of mbufs to be transmitted or received. */
34 #define KNI_NUM_MBUF_THRESHOLD 100
35 static int kni_pkt_mtu = 0;
37 struct test_kni_stats {
38 volatile uint64_t ingress;
39 volatile uint64_t egress;
42 static const struct rte_eth_rxconf rx_conf = {
51 static const struct rte_eth_txconf tx_conf = {
61 static const struct rte_eth_conf port_conf = {
70 .mq_mode = ETH_DCB_NONE,
74 static struct rte_kni_ops kni_ops = {
76 .config_network_if = NULL,
77 .config_mac_address = NULL,
78 .config_promiscusity = NULL,
81 static unsigned lcore_master, lcore_ingress, lcore_egress;
82 static struct rte_kni *test_kni_ctx;
83 static struct test_kni_stats stats;
85 static volatile uint32_t test_kni_processing_flag;
87 static struct rte_mempool *
88 test_kni_create_mempool(void)
90 struct rte_mempool * mp;
92 mp = rte_mempool_lookup("kni_mempool");
94 mp = rte_pktmbuf_pool_create("kni_mempool",
96 MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ,
102 static struct rte_mempool *
103 test_kni_lookup_mempool(void)
105 return rte_mempool_lookup("kni_mempool");
107 /* Callback for request of changing MTU */
109 kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
111 printf("Change MTU of port %d to %u\n", port_id, new_mtu);
112 kni_pkt_mtu = new_mtu;
113 printf("Change MTU of port %d to %i successfully.\n",
114 port_id, kni_pkt_mtu);
118 * This loop fully tests the basic functions of KNI. e.g. transmitting,
119 * receiving to, from kernel space, and kernel requests.
121 * This is the loop to transmit/receive mbufs to/from kernel interface with
122 * supported by KNI kernel module. The ingress lcore will allocate mbufs and
123 * transmit them to kernel space; while the egress lcore will receive the mbufs
124 * from kernel space and free them.
125 * On the master lcore, several commands will be run to check handling the
126 * kernel requests. And it will finally set the flag to exit the KNI
127 * transmitting/receiving to/from the kernel space.
129 * Note: To support this testing, the KNI kernel module needs to be insmodded
130 * in one of its loopback modes.
133 test_kni_loop(__rte_unused void *arg)
136 unsigned nb_rx, nb_tx, num, i;
137 const unsigned lcore_id = rte_lcore_id();
138 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
140 if (lcore_id == lcore_master) {
141 rte_delay_ms(KNI_TIMEOUT_MS);
142 /* tests of handling kernel request */
143 if (system(IFCONFIG TEST_KNI_PORT" up") == -1)
145 if (system(IFCONFIG TEST_KNI_PORT" mtu 1400") == -1)
147 if (system(IFCONFIG TEST_KNI_PORT" down") == -1)
149 rte_delay_ms(KNI_TIMEOUT_MS);
150 test_kni_processing_flag = 1;
151 } else if (lcore_id == lcore_ingress) {
152 struct rte_mempool *mp = test_kni_lookup_mempool();
158 if (test_kni_processing_flag)
161 for (nb_rx = 0; nb_rx < PKT_BURST_SZ; nb_rx++) {
162 pkts_burst[nb_rx] = rte_pktmbuf_alloc(mp);
163 if (!pkts_burst[nb_rx])
167 num = rte_kni_tx_burst(test_kni_ctx, pkts_burst,
169 stats.ingress += num;
170 rte_kni_handle_request(test_kni_ctx);
172 for (i = num; i < nb_rx; i++) {
173 rte_pktmbuf_free(pkts_burst[i]);
178 } else if (lcore_id == lcore_egress) {
180 if (test_kni_processing_flag)
182 num = rte_kni_rx_burst(test_kni_ctx, pkts_burst,
185 for (nb_tx = 0; nb_tx < num; nb_tx++)
186 rte_pktmbuf_free(pkts_burst[nb_tx]);
195 test_kni_allocate_lcores(void)
197 unsigned i, count = 0;
199 lcore_master = rte_get_master_lcore();
200 printf("master lcore: %u\n", lcore_master);
201 for (i = 0; i < RTE_MAX_LCORE; i++) {
204 if (rte_lcore_is_enabled(i) && i != lcore_master) {
212 printf("count: %u\n", count);
214 return count == 2 ? 0 : -1;
218 test_kni_register_handler_mp(void)
220 #define TEST_KNI_HANDLE_REQ_COUNT 10 /* 5s */
221 #define TEST_KNI_HANDLE_REQ_INTERVAL 500 /* ms */
222 #define TEST_KNI_MTU 1450
223 #define TEST_KNI_MTU_STR " 1450"
228 printf("Failed to fork a process\n");
230 } else if (pid == 0) {
232 struct rte_kni *kni = rte_kni_get(TEST_KNI_PORT);
233 struct rte_kni_ops ops = {
234 .change_mtu = kni_change_mtu,
235 .config_network_if = NULL,
236 .config_mac_address = NULL,
237 .config_promiscusity = NULL,
241 printf("Failed to get KNI named %s\n", TEST_KNI_PORT);
247 /* Check with the invalid parameters */
248 if (rte_kni_register_handlers(kni, NULL) == 0) {
249 printf("Unexpectedly register successuflly "
250 "with NULL ops pointer\n");
253 if (rte_kni_register_handlers(NULL, &ops) == 0) {
254 printf("Unexpectedly register successfully "
255 "to NULL KNI device pointer\n");
259 if (rte_kni_register_handlers(kni, &ops)) {
260 printf("Fail to register ops\n");
264 /* Check registering again after it has been registered */
265 if (rte_kni_register_handlers(kni, &ops) == 0) {
266 printf("Unexpectedly register successfully after "
267 "it has already been registered\n");
272 * Handle the request of setting MTU,
273 * with registered handlers.
275 for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
276 rte_kni_handle_request(kni);
277 if (kni_pkt_mtu == TEST_KNI_MTU)
279 rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
281 if (i >= TEST_KNI_HANDLE_REQ_COUNT) {
282 printf("MTU has not been set\n");
287 if (rte_kni_unregister_handlers(kni) < 0) {
288 printf("Fail to unregister ops\n");
292 /* Check with invalid parameter */
293 if (rte_kni_unregister_handlers(NULL) == 0) {
298 * Handle the request of setting MTU,
299 * without registered handlers.
301 for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
302 rte_kni_handle_request(kni);
303 if (kni_pkt_mtu != 0)
305 rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
307 if (kni_pkt_mtu != 0) {
308 printf("MTU shouldn't be set\n");
317 if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
322 if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
326 p_ret = wait(&status);
327 if (!WIFEXITED(status)) {
328 printf("Child process (%d) exit abnormally\n", p_ret);
331 if (WEXITSTATUS(status) != 0) {
332 printf("Child process exit with failure\n");
341 test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
346 struct rte_kni_conf conf;
347 struct rte_eth_dev_info info;
348 struct rte_kni_ops ops;
353 memset(&conf, 0, sizeof(conf));
354 memset(&info, 0, sizeof(info));
355 memset(&ops, 0, sizeof(ops));
357 rte_eth_dev_info_get(port_id, &info);
358 conf.addr = info.pci_dev->addr;
359 conf.id = info.pci_dev->id;
360 snprintf(conf.name, sizeof(conf.name), TEST_KNI_PORT);
362 /* core id 1 configured for kernel thread */
365 conf.mbuf_size = MAX_PACKET_SZ;
366 conf.group_id = port_id;
369 ops.port_id = port_id;
371 /* basic test of kni processing */
372 kni = rte_kni_alloc(mp, &conf, &ops);
374 printf("fail to create kni\n");
379 test_kni_processing_flag = 0;
384 * Check multiple processes support on
385 * registerring/unregisterring handlers.
387 if (test_kni_register_handler_mp() < 0) {
388 printf("fail to check multiple process support\n");
393 rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MASTER);
394 RTE_LCORE_FOREACH_SLAVE(i) {
395 if (rte_eal_wait_lcore(i) < 0) {
401 * Check if the number of mbufs received from kernel space is equal
402 * to that of transmitted to kernel space
404 if (stats.ingress < KNI_NUM_MBUF_THRESHOLD ||
405 stats.egress < KNI_NUM_MBUF_THRESHOLD) {
406 printf("The ingress/egress number should not be "
407 "less than %u\n", (unsigned)KNI_NUM_MBUF_THRESHOLD);
412 if (rte_kni_release(kni) < 0) {
413 printf("fail to release kni\n");
418 /* test of releasing a released kni device */
419 if (rte_kni_release(kni) == 0) {
420 printf("should not release a released kni device\n");
424 /* test of reusing memzone */
425 kni = rte_kni_alloc(mp, &conf, &ops);
427 printf("fail to create kni\n");
431 /* Release the kni for following testing */
432 if (rte_kni_release(kni) < 0) {
433 printf("fail to release kni\n");
439 if (rte_kni_release(kni) < 0) {
440 printf("fail to release kni\n");
451 uint16_t nb_ports, port_id;
453 struct rte_mempool *mp;
454 struct rte_kni_conf conf;
455 struct rte_eth_dev_info info;
456 struct rte_kni_ops ops;
458 /* Initialize KNI subsytem */
459 rte_kni_init(KNI_TEST_MAX_PORTS);
461 if (test_kni_allocate_lcores() < 0) {
462 printf("No enough lcores for kni processing\n");
466 mp = test_kni_create_mempool();
468 printf("fail to create mempool for kni\n");
472 nb_ports = rte_eth_dev_count();
474 printf("no supported nic port found\n");
478 /* configuring port 0 for the test is enough */
480 ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
482 printf("fail to configure port %d\n", port_id);
486 ret = rte_eth_rx_queue_setup(port_id, 0, NB_RXD, SOCKET, &rx_conf, mp);
488 printf("fail to setup rx queue for port %d\n", port_id);
492 ret = rte_eth_tx_queue_setup(port_id, 0, NB_TXD, SOCKET, &tx_conf);
494 printf("fail to setup tx queue for port %d\n", port_id);
498 ret = rte_eth_dev_start(port_id);
500 printf("fail to start port %d\n", port_id);
503 rte_eth_promiscuous_enable(port_id);
505 /* basic test of kni processing */
506 ret = test_kni_processing(port_id, mp);
510 /* test of allocating KNI with NULL mempool pointer */
511 memset(&info, 0, sizeof(info));
512 memset(&conf, 0, sizeof(conf));
513 memset(&ops, 0, sizeof(ops));
514 rte_eth_dev_info_get(port_id, &info);
515 conf.addr = info.pci_dev->addr;
516 conf.id = info.pci_dev->id;
517 conf.group_id = port_id;
518 conf.mbuf_size = MAX_PACKET_SZ;
521 ops.port_id = port_id;
522 kni = rte_kni_alloc(NULL, &conf, &ops);
525 printf("unexpectedly creates kni successfully with NULL "
526 "mempool pointer\n");
530 /* test of allocating KNI without configurations */
531 kni = rte_kni_alloc(mp, NULL, NULL);
534 printf("Unexpectedly allocate KNI device successfully "
535 "without configurations\n");
539 /* test of allocating KNI without a name */
540 memset(&conf, 0, sizeof(conf));
541 memset(&info, 0, sizeof(info));
542 memset(&ops, 0, sizeof(ops));
543 rte_eth_dev_info_get(port_id, &info);
544 conf.addr = info.pci_dev->addr;
545 conf.id = info.pci_dev->id;
546 conf.group_id = port_id;
547 conf.mbuf_size = MAX_PACKET_SZ;
550 ops.port_id = port_id;
551 kni = rte_kni_alloc(mp, &conf, &ops);
554 printf("Unexpectedly allocate a KNI device successfully "
559 /* test of releasing NULL kni context */
560 ret = rte_kni_release(NULL);
563 printf("unexpectedly release kni successfully\n");
567 /* test of handling request on NULL device pointer */
568 ret = rte_kni_handle_request(NULL);
571 printf("Unexpectedly handle request on NULL device pointer\n");
575 /* test of getting KNI device with pointer to NULL */
576 kni = rte_kni_get(NULL);
579 printf("Unexpectedly get a KNI device with "
580 "NULL name pointer\n");
584 /* test of getting KNI device with an zero length name string */
585 memset(&conf, 0, sizeof(conf));
586 kni = rte_kni_get(conf.name);
589 printf("Unexpectedly get a KNI device with "
590 "zero length name string\n");
594 /* test of getting KNI device with an invalid string name */
595 memset(&conf, 0, sizeof(conf));
596 snprintf(conf.name, sizeof(conf.name), "testing");
597 kni = rte_kni_get(conf.name);
600 printf("Unexpectedly get a KNI device with "
601 "a never used name string\n");
607 rte_eth_dev_stop(port_id);
612 REGISTER_TEST_COMMAND(kni_autotest, test_kni);