1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #ifndef RTE_LIBRTE_KNI
18 printf("KNI not supported, skipping test\n");
24 #include <rte_string_fns.h>
25 #include <rte_mempool.h>
26 #include <rte_ethdev.h>
27 #include <rte_bus_pci.h>
28 #include <rte_cycles.h>
32 #define MAX_PACKET_SZ 2048
33 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
34 #define PKT_BURST_SZ 32
35 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
39 #define KNI_TIMEOUT_MS 5000 /* ms */
41 #define IFCONFIG "/sbin/ifconfig "
42 #define TEST_KNI_PORT "test_kni_port"
43 #define KNI_TEST_MAX_PORTS 4
44 /* The threshold number of mbufs to be transmitted or received. */
45 #define KNI_NUM_MBUF_THRESHOLD 100
46 static int kni_pkt_mtu = 0;
48 struct test_kni_stats {
49 volatile uint64_t ingress;
50 volatile uint64_t egress;
53 static const struct rte_eth_rxconf rx_conf = {
62 static const struct rte_eth_txconf tx_conf = {
72 static const struct rte_eth_conf port_conf = {
74 .mq_mode = ETH_DCB_NONE,
78 static struct rte_kni_ops kni_ops = {
80 .config_network_if = NULL,
81 .config_mac_address = NULL,
82 .config_promiscusity = NULL,
85 static unsigned lcore_master, lcore_ingress, lcore_egress;
86 static struct rte_kni *test_kni_ctx;
87 static struct test_kni_stats stats;
89 static volatile uint32_t test_kni_processing_flag;
91 static struct rte_mempool *
92 test_kni_create_mempool(void)
94 struct rte_mempool * mp;
96 mp = rte_mempool_lookup("kni_mempool");
98 mp = rte_pktmbuf_pool_create("kni_mempool",
100 MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ,
106 static struct rte_mempool *
107 test_kni_lookup_mempool(void)
109 return rte_mempool_lookup("kni_mempool");
111 /* Callback for request of changing MTU */
113 kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
115 printf("Change MTU of port %d to %u\n", port_id, new_mtu);
116 kni_pkt_mtu = new_mtu;
117 printf("Change MTU of port %d to %i successfully.\n",
118 port_id, kni_pkt_mtu);
122 * This loop fully tests the basic functions of KNI. e.g. transmitting,
123 * receiving to, from kernel space, and kernel requests.
125 * This is the loop to transmit/receive mbufs to/from kernel interface with
126 * supported by KNI kernel module. The ingress lcore will allocate mbufs and
127 * transmit them to kernel space; while the egress lcore will receive the mbufs
128 * from kernel space and free them.
129 * On the master lcore, several commands will be run to check handling the
130 * kernel requests. And it will finally set the flag to exit the KNI
131 * transmitting/receiving to/from the kernel space.
133 * Note: To support this testing, the KNI kernel module needs to be insmodded
134 * in one of its loopback modes.
137 test_kni_loop(__rte_unused void *arg)
140 unsigned nb_rx, nb_tx, num, i;
141 const unsigned lcore_id = rte_lcore_id();
142 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
144 if (lcore_id == lcore_master) {
145 rte_delay_ms(KNI_TIMEOUT_MS);
146 /* tests of handling kernel request */
147 if (system(IFCONFIG TEST_KNI_PORT" up") == -1)
149 if (system(IFCONFIG TEST_KNI_PORT" mtu 1400") == -1)
151 if (system(IFCONFIG TEST_KNI_PORT" down") == -1)
153 rte_delay_ms(KNI_TIMEOUT_MS);
154 test_kni_processing_flag = 1;
155 } else if (lcore_id == lcore_ingress) {
156 struct rte_mempool *mp = test_kni_lookup_mempool();
162 if (test_kni_processing_flag)
165 for (nb_rx = 0; nb_rx < PKT_BURST_SZ; nb_rx++) {
166 pkts_burst[nb_rx] = rte_pktmbuf_alloc(mp);
167 if (!pkts_burst[nb_rx])
171 num = rte_kni_tx_burst(test_kni_ctx, pkts_burst,
173 stats.ingress += num;
174 rte_kni_handle_request(test_kni_ctx);
176 for (i = num; i < nb_rx; i++) {
177 rte_pktmbuf_free(pkts_burst[i]);
182 } else if (lcore_id == lcore_egress) {
184 if (test_kni_processing_flag)
186 num = rte_kni_rx_burst(test_kni_ctx, pkts_burst,
189 for (nb_tx = 0; nb_tx < num; nb_tx++)
190 rte_pktmbuf_free(pkts_burst[nb_tx]);
199 test_kni_allocate_lcores(void)
201 unsigned i, count = 0;
203 lcore_master = rte_get_master_lcore();
204 printf("master lcore: %u\n", lcore_master);
205 for (i = 0; i < RTE_MAX_LCORE; i++) {
208 if (rte_lcore_is_enabled(i) && i != lcore_master) {
216 printf("count: %u\n", count);
218 return count == 2 ? 0 : -1;
222 test_kni_register_handler_mp(void)
224 #define TEST_KNI_HANDLE_REQ_COUNT 10 /* 5s */
225 #define TEST_KNI_HANDLE_REQ_INTERVAL 500 /* ms */
226 #define TEST_KNI_MTU 1450
227 #define TEST_KNI_MTU_STR " 1450"
232 printf("Failed to fork a process\n");
234 } else if (pid == 0) {
236 struct rte_kni *kni = rte_kni_get(TEST_KNI_PORT);
237 struct rte_kni_ops ops = {
238 .change_mtu = kni_change_mtu,
239 .config_network_if = NULL,
240 .config_mac_address = NULL,
241 .config_promiscusity = NULL,
245 printf("Failed to get KNI named %s\n", TEST_KNI_PORT);
251 /* Check with the invalid parameters */
252 if (rte_kni_register_handlers(kni, NULL) == 0) {
253 printf("Unexpectedly register successuflly "
254 "with NULL ops pointer\n");
257 if (rte_kni_register_handlers(NULL, &ops) == 0) {
258 printf("Unexpectedly register successfully "
259 "to NULL KNI device pointer\n");
263 if (rte_kni_register_handlers(kni, &ops)) {
264 printf("Fail to register ops\n");
268 /* Check registering again after it has been registered */
269 if (rte_kni_register_handlers(kni, &ops) == 0) {
270 printf("Unexpectedly register successfully after "
271 "it has already been registered\n");
276 * Handle the request of setting MTU,
277 * with registered handlers.
279 for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
280 rte_kni_handle_request(kni);
281 if (kni_pkt_mtu == TEST_KNI_MTU)
283 rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
285 if (i >= TEST_KNI_HANDLE_REQ_COUNT) {
286 printf("MTU has not been set\n");
291 if (rte_kni_unregister_handlers(kni) < 0) {
292 printf("Fail to unregister ops\n");
296 /* Check with invalid parameter */
297 if (rte_kni_unregister_handlers(NULL) == 0) {
302 * Handle the request of setting MTU,
303 * without registered handlers.
305 for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
306 rte_kni_handle_request(kni);
307 if (kni_pkt_mtu != 0)
309 rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
311 if (kni_pkt_mtu != 0) {
312 printf("MTU shouldn't be set\n");
321 if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
326 if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
330 p_ret = wait(&status);
331 if (!WIFEXITED(status)) {
332 printf("Child process (%d) exit abnormally\n", p_ret);
335 if (WEXITSTATUS(status) != 0) {
336 printf("Child process exit with failure\n");
345 test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
350 struct rte_kni_conf conf;
351 struct rte_eth_dev_info info;
352 struct rte_kni_ops ops;
353 const struct rte_pci_device *pci_dev;
354 const struct rte_bus *bus = NULL;
359 memset(&conf, 0, sizeof(conf));
360 memset(&info, 0, sizeof(info));
361 memset(&ops, 0, sizeof(ops));
363 rte_eth_dev_info_get(port_id, &info);
365 bus = rte_bus_find_by_device(info.device);
366 if (bus && !strcmp(bus->name, "pci")) {
367 pci_dev = RTE_DEV_TO_PCI(info.device);
368 conf.addr = pci_dev->addr;
369 conf.id = pci_dev->id;
371 snprintf(conf.name, sizeof(conf.name), TEST_KNI_PORT);
373 /* core id 1 configured for kernel thread */
376 conf.mbuf_size = MAX_PACKET_SZ;
377 conf.group_id = port_id;
380 ops.port_id = port_id;
382 /* basic test of kni processing */
383 kni = rte_kni_alloc(mp, &conf, &ops);
385 printf("fail to create kni\n");
390 test_kni_processing_flag = 0;
395 * Check multiple processes support on
396 * registerring/unregisterring handlers.
398 if (test_kni_register_handler_mp() < 0) {
399 printf("fail to check multiple process support\n");
404 rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MASTER);
405 RTE_LCORE_FOREACH_SLAVE(i) {
406 if (rte_eal_wait_lcore(i) < 0) {
412 * Check if the number of mbufs received from kernel space is equal
413 * to that of transmitted to kernel space
415 if (stats.ingress < KNI_NUM_MBUF_THRESHOLD ||
416 stats.egress < KNI_NUM_MBUF_THRESHOLD) {
417 printf("The ingress/egress number should not be "
418 "less than %u\n", (unsigned)KNI_NUM_MBUF_THRESHOLD);
423 if (rte_kni_release(kni) < 0) {
424 printf("fail to release kni\n");
429 /* test of releasing a released kni device */
430 if (rte_kni_release(kni) == 0) {
431 printf("should not release a released kni device\n");
435 /* test of reusing memzone */
436 kni = rte_kni_alloc(mp, &conf, &ops);
438 printf("fail to create kni\n");
442 /* Release the kni for following testing */
443 if (rte_kni_release(kni) < 0) {
444 printf("fail to release kni\n");
450 if (rte_kni_release(kni) < 0) {
451 printf("fail to release kni\n");
462 uint16_t nb_ports, port_id;
464 struct rte_mempool *mp;
465 struct rte_kni_conf conf;
466 struct rte_eth_dev_info info;
467 struct rte_kni_ops ops;
468 const struct rte_pci_device *pci_dev;
469 const struct rte_bus *bus;
471 /* Initialize KNI subsytem */
472 rte_kni_init(KNI_TEST_MAX_PORTS);
474 if (test_kni_allocate_lcores() < 0) {
475 printf("No enough lcores for kni processing\n");
479 mp = test_kni_create_mempool();
481 printf("fail to create mempool for kni\n");
485 nb_ports = rte_eth_dev_count_avail();
487 printf("no supported nic port found\n");
491 /* configuring port 0 for the test is enough */
493 ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
495 printf("fail to configure port %d\n", port_id);
499 ret = rte_eth_rx_queue_setup(port_id, 0, NB_RXD, SOCKET, &rx_conf, mp);
501 printf("fail to setup rx queue for port %d\n", port_id);
505 ret = rte_eth_tx_queue_setup(port_id, 0, NB_TXD, SOCKET, &tx_conf);
507 printf("fail to setup tx queue for port %d\n", port_id);
511 ret = rte_eth_dev_start(port_id);
513 printf("fail to start port %d\n", port_id);
516 rte_eth_promiscuous_enable(port_id);
518 /* basic test of kni processing */
519 ret = test_kni_processing(port_id, mp);
523 /* test of allocating KNI with NULL mempool pointer */
524 memset(&info, 0, sizeof(info));
525 memset(&conf, 0, sizeof(conf));
526 memset(&ops, 0, sizeof(ops));
527 rte_eth_dev_info_get(port_id, &info);
529 bus = rte_bus_find_by_device(info.device);
532 if (bus && !strcmp(bus->name, "pci")) {
533 pci_dev = RTE_DEV_TO_PCI(info.device);
534 conf.addr = pci_dev->addr;
535 conf.id = pci_dev->id;
537 conf.group_id = port_id;
538 conf.mbuf_size = MAX_PACKET_SZ;
541 ops.port_id = port_id;
542 kni = rte_kni_alloc(NULL, &conf, &ops);
545 printf("unexpectedly creates kni successfully with NULL "
546 "mempool pointer\n");
550 /* test of allocating KNI without configurations */
551 kni = rte_kni_alloc(mp, NULL, NULL);
554 printf("Unexpectedly allocate KNI device successfully "
555 "without configurations\n");
559 /* test of allocating KNI without a name */
560 memset(&conf, 0, sizeof(conf));
561 memset(&info, 0, sizeof(info));
562 memset(&ops, 0, sizeof(ops));
563 rte_eth_dev_info_get(port_id, &info);
565 bus = rte_bus_find_by_device(info.device);
568 if (bus && !strcmp(bus->name, "pci")) {
569 pci_dev = RTE_DEV_TO_PCI(info.device);
570 conf.addr = pci_dev->addr;
571 conf.id = pci_dev->id;
573 conf.group_id = port_id;
574 conf.mbuf_size = MAX_PACKET_SZ;
577 ops.port_id = port_id;
578 kni = rte_kni_alloc(mp, &conf, &ops);
581 printf("Unexpectedly allocate a KNI device successfully "
586 /* test of releasing NULL kni context */
587 ret = rte_kni_release(NULL);
590 printf("unexpectedly release kni successfully\n");
594 /* test of handling request on NULL device pointer */
595 ret = rte_kni_handle_request(NULL);
598 printf("Unexpectedly handle request on NULL device pointer\n");
602 /* test of getting KNI device with pointer to NULL */
603 kni = rte_kni_get(NULL);
606 printf("Unexpectedly get a KNI device with "
607 "NULL name pointer\n");
611 /* test of getting KNI device with an zero length name string */
612 memset(&conf, 0, sizeof(conf));
613 kni = rte_kni_get(conf.name);
616 printf("Unexpectedly get a KNI device with "
617 "zero length name string\n");
621 /* test of getting KNI device with an invalid string name */
622 memset(&conf, 0, sizeof(conf));
623 snprintf(conf.name, sizeof(conf.name), "testing");
624 kni = rte_kni_get(conf.name);
627 printf("Unexpectedly get a KNI device with "
628 "a never used name string\n");
634 rte_eth_dev_stop(port_id);
641 REGISTER_TEST_COMMAND(kni_autotest, test_kni);