1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #ifndef RTE_LIBRTE_KNI
18 printf("KNI not supported, skipping test\n");
24 #include <rte_string_fns.h>
25 #include <rte_mempool.h>
26 #include <rte_ethdev.h>
27 #include <rte_bus_pci.h>
28 #include <rte_cycles.h>
32 #define MAX_PACKET_SZ 2048
33 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
34 #define PKT_BURST_SZ 32
35 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
39 #define KNI_TIMEOUT_MS 5000 /* ms */
41 #define IFCONFIG "/sbin/ifconfig "
42 #define TEST_KNI_PORT "test_kni_port"
43 #define KNI_TEST_MAX_PORTS 4
44 /* The threshold number of mbufs to be transmitted or received. */
45 #define KNI_NUM_MBUF_THRESHOLD 100
46 static int kni_pkt_mtu = 0;
48 struct test_kni_stats {
49 volatile uint64_t ingress;
50 volatile uint64_t egress;
53 static const struct rte_eth_rxconf rx_conf = {
62 static const struct rte_eth_txconf tx_conf = {
72 static const struct rte_eth_conf port_conf = {
74 .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
77 .mq_mode = ETH_DCB_NONE,
81 static struct rte_kni_ops kni_ops = {
83 .config_network_if = NULL,
84 .config_mac_address = NULL,
85 .config_promiscusity = NULL,
88 static unsigned lcore_master, lcore_ingress, lcore_egress;
89 static struct rte_kni *test_kni_ctx;
90 static struct test_kni_stats stats;
92 static volatile uint32_t test_kni_processing_flag;
94 static struct rte_mempool *
95 test_kni_create_mempool(void)
97 struct rte_mempool * mp;
99 mp = rte_mempool_lookup("kni_mempool");
101 mp = rte_pktmbuf_pool_create("kni_mempool",
103 MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ,
109 static struct rte_mempool *
110 test_kni_lookup_mempool(void)
112 return rte_mempool_lookup("kni_mempool");
114 /* Callback for request of changing MTU */
116 kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
118 printf("Change MTU of port %d to %u\n", port_id, new_mtu);
119 kni_pkt_mtu = new_mtu;
120 printf("Change MTU of port %d to %i successfully.\n",
121 port_id, kni_pkt_mtu);
125 * This loop fully tests the basic functions of KNI. e.g. transmitting,
126 * receiving to, from kernel space, and kernel requests.
128 * This is the loop to transmit/receive mbufs to/from kernel interface with
129 * supported by KNI kernel module. The ingress lcore will allocate mbufs and
130 * transmit them to kernel space; while the egress lcore will receive the mbufs
131 * from kernel space and free them.
132 * On the master lcore, several commands will be run to check handling the
133 * kernel requests. And it will finally set the flag to exit the KNI
134 * transmitting/receiving to/from the kernel space.
136 * Note: To support this testing, the KNI kernel module needs to be insmodded
137 * in one of its loopback modes.
140 test_kni_loop(__rte_unused void *arg)
143 unsigned nb_rx, nb_tx, num, i;
144 const unsigned lcore_id = rte_lcore_id();
145 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
147 if (lcore_id == lcore_master) {
148 rte_delay_ms(KNI_TIMEOUT_MS);
149 /* tests of handling kernel request */
150 if (system(IFCONFIG TEST_KNI_PORT" up") == -1)
152 if (system(IFCONFIG TEST_KNI_PORT" mtu 1400") == -1)
154 if (system(IFCONFIG TEST_KNI_PORT" down") == -1)
156 rte_delay_ms(KNI_TIMEOUT_MS);
157 test_kni_processing_flag = 1;
158 } else if (lcore_id == lcore_ingress) {
159 struct rte_mempool *mp = test_kni_lookup_mempool();
165 if (test_kni_processing_flag)
168 for (nb_rx = 0; nb_rx < PKT_BURST_SZ; nb_rx++) {
169 pkts_burst[nb_rx] = rte_pktmbuf_alloc(mp);
170 if (!pkts_burst[nb_rx])
174 num = rte_kni_tx_burst(test_kni_ctx, pkts_burst,
176 stats.ingress += num;
177 rte_kni_handle_request(test_kni_ctx);
179 for (i = num; i < nb_rx; i++) {
180 rte_pktmbuf_free(pkts_burst[i]);
185 } else if (lcore_id == lcore_egress) {
187 if (test_kni_processing_flag)
189 num = rte_kni_rx_burst(test_kni_ctx, pkts_burst,
192 for (nb_tx = 0; nb_tx < num; nb_tx++)
193 rte_pktmbuf_free(pkts_burst[nb_tx]);
202 test_kni_allocate_lcores(void)
204 unsigned i, count = 0;
206 lcore_master = rte_get_master_lcore();
207 printf("master lcore: %u\n", lcore_master);
208 for (i = 0; i < RTE_MAX_LCORE; i++) {
211 if (rte_lcore_is_enabled(i) && i != lcore_master) {
219 printf("count: %u\n", count);
221 return count == 2 ? 0 : -1;
225 test_kni_register_handler_mp(void)
227 #define TEST_KNI_HANDLE_REQ_COUNT 10 /* 5s */
228 #define TEST_KNI_HANDLE_REQ_INTERVAL 500 /* ms */
229 #define TEST_KNI_MTU 1450
230 #define TEST_KNI_MTU_STR " 1450"
235 printf("Failed to fork a process\n");
237 } else if (pid == 0) {
239 struct rte_kni *kni = rte_kni_get(TEST_KNI_PORT);
240 struct rte_kni_ops ops = {
241 .change_mtu = kni_change_mtu,
242 .config_network_if = NULL,
243 .config_mac_address = NULL,
244 .config_promiscusity = NULL,
248 printf("Failed to get KNI named %s\n", TEST_KNI_PORT);
254 /* Check with the invalid parameters */
255 if (rte_kni_register_handlers(kni, NULL) == 0) {
256 printf("Unexpectedly register successuflly "
257 "with NULL ops pointer\n");
260 if (rte_kni_register_handlers(NULL, &ops) == 0) {
261 printf("Unexpectedly register successfully "
262 "to NULL KNI device pointer\n");
266 if (rte_kni_register_handlers(kni, &ops)) {
267 printf("Fail to register ops\n");
271 /* Check registering again after it has been registered */
272 if (rte_kni_register_handlers(kni, &ops) == 0) {
273 printf("Unexpectedly register successfully after "
274 "it has already been registered\n");
279 * Handle the request of setting MTU,
280 * with registered handlers.
282 for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
283 rte_kni_handle_request(kni);
284 if (kni_pkt_mtu == TEST_KNI_MTU)
286 rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
288 if (i >= TEST_KNI_HANDLE_REQ_COUNT) {
289 printf("MTU has not been set\n");
294 if (rte_kni_unregister_handlers(kni) < 0) {
295 printf("Fail to unregister ops\n");
299 /* Check with invalid parameter */
300 if (rte_kni_unregister_handlers(NULL) == 0) {
305 * Handle the request of setting MTU,
306 * without registered handlers.
308 for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
309 rte_kni_handle_request(kni);
310 if (kni_pkt_mtu != 0)
312 rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
314 if (kni_pkt_mtu != 0) {
315 printf("MTU shouldn't be set\n");
324 if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
329 if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
333 p_ret = wait(&status);
334 if (!WIFEXITED(status)) {
335 printf("Child process (%d) exit abnormally\n", p_ret);
338 if (WEXITSTATUS(status) != 0) {
339 printf("Child process exit with failure\n");
348 test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
353 struct rte_kni_conf conf;
354 struct rte_eth_dev_info info;
355 struct rte_kni_ops ops;
356 const struct rte_pci_device *pci_dev;
357 const struct rte_bus *bus = NULL;
362 memset(&conf, 0, sizeof(conf));
363 memset(&info, 0, sizeof(info));
364 memset(&ops, 0, sizeof(ops));
366 rte_eth_dev_info_get(port_id, &info);
368 bus = rte_bus_find_by_device(info.device);
369 if (bus && !strcmp(bus->name, "pci")) {
370 pci_dev = RTE_DEV_TO_PCI(info.device);
371 conf.addr = pci_dev->addr;
372 conf.id = pci_dev->id;
374 snprintf(conf.name, sizeof(conf.name), TEST_KNI_PORT);
376 /* core id 1 configured for kernel thread */
379 conf.mbuf_size = MAX_PACKET_SZ;
380 conf.group_id = port_id;
383 ops.port_id = port_id;
385 /* basic test of kni processing */
386 kni = rte_kni_alloc(mp, &conf, &ops);
388 printf("fail to create kni\n");
393 test_kni_processing_flag = 0;
398 * Check multiple processes support on
399 * registerring/unregisterring handlers.
401 if (test_kni_register_handler_mp() < 0) {
402 printf("fail to check multiple process support\n");
407 rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MASTER);
408 RTE_LCORE_FOREACH_SLAVE(i) {
409 if (rte_eal_wait_lcore(i) < 0) {
415 * Check if the number of mbufs received from kernel space is equal
416 * to that of transmitted to kernel space
418 if (stats.ingress < KNI_NUM_MBUF_THRESHOLD ||
419 stats.egress < KNI_NUM_MBUF_THRESHOLD) {
420 printf("The ingress/egress number should not be "
421 "less than %u\n", (unsigned)KNI_NUM_MBUF_THRESHOLD);
426 if (rte_kni_release(kni) < 0) {
427 printf("fail to release kni\n");
432 /* test of releasing a released kni device */
433 if (rte_kni_release(kni) == 0) {
434 printf("should not release a released kni device\n");
438 /* test of reusing memzone */
439 kni = rte_kni_alloc(mp, &conf, &ops);
441 printf("fail to create kni\n");
445 /* Release the kni for following testing */
446 if (rte_kni_release(kni) < 0) {
447 printf("fail to release kni\n");
453 if (rte_kni_release(kni) < 0) {
454 printf("fail to release kni\n");
465 uint16_t nb_ports, port_id;
467 struct rte_mempool *mp;
468 struct rte_kni_conf conf;
469 struct rte_eth_dev_info info;
470 struct rte_kni_ops ops;
471 const struct rte_pci_device *pci_dev;
472 const struct rte_bus *bus;
474 /* Initialize KNI subsytem */
475 rte_kni_init(KNI_TEST_MAX_PORTS);
477 if (test_kni_allocate_lcores() < 0) {
478 printf("No enough lcores for kni processing\n");
482 mp = test_kni_create_mempool();
484 printf("fail to create mempool for kni\n");
488 nb_ports = rte_eth_dev_count_avail();
490 printf("no supported nic port found\n");
494 /* configuring port 0 for the test is enough */
496 ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
498 printf("fail to configure port %d\n", port_id);
502 ret = rte_eth_rx_queue_setup(port_id, 0, NB_RXD, SOCKET, &rx_conf, mp);
504 printf("fail to setup rx queue for port %d\n", port_id);
508 ret = rte_eth_tx_queue_setup(port_id, 0, NB_TXD, SOCKET, &tx_conf);
510 printf("fail to setup tx queue for port %d\n", port_id);
514 ret = rte_eth_dev_start(port_id);
516 printf("fail to start port %d\n", port_id);
519 rte_eth_promiscuous_enable(port_id);
521 /* basic test of kni processing */
522 ret = test_kni_processing(port_id, mp);
526 /* test of allocating KNI with NULL mempool pointer */
527 memset(&info, 0, sizeof(info));
528 memset(&conf, 0, sizeof(conf));
529 memset(&ops, 0, sizeof(ops));
530 rte_eth_dev_info_get(port_id, &info);
532 bus = rte_bus_find_by_device(info.device);
535 if (bus && !strcmp(bus->name, "pci")) {
536 pci_dev = RTE_DEV_TO_PCI(info.device);
537 conf.addr = pci_dev->addr;
538 conf.id = pci_dev->id;
540 conf.group_id = port_id;
541 conf.mbuf_size = MAX_PACKET_SZ;
544 ops.port_id = port_id;
545 kni = rte_kni_alloc(NULL, &conf, &ops);
548 printf("unexpectedly creates kni successfully with NULL "
549 "mempool pointer\n");
553 /* test of allocating KNI without configurations */
554 kni = rte_kni_alloc(mp, NULL, NULL);
557 printf("Unexpectedly allocate KNI device successfully "
558 "without configurations\n");
562 /* test of allocating KNI without a name */
563 memset(&conf, 0, sizeof(conf));
564 memset(&info, 0, sizeof(info));
565 memset(&ops, 0, sizeof(ops));
566 rte_eth_dev_info_get(port_id, &info);
568 bus = rte_bus_find_by_device(info.device);
571 if (bus && !strcmp(bus->name, "pci")) {
572 pci_dev = RTE_DEV_TO_PCI(info.device);
573 conf.addr = pci_dev->addr;
574 conf.id = pci_dev->id;
576 conf.group_id = port_id;
577 conf.mbuf_size = MAX_PACKET_SZ;
580 ops.port_id = port_id;
581 kni = rte_kni_alloc(mp, &conf, &ops);
584 printf("Unexpectedly allocate a KNI device successfully "
589 /* test of releasing NULL kni context */
590 ret = rte_kni_release(NULL);
593 printf("unexpectedly release kni successfully\n");
597 /* test of handling request on NULL device pointer */
598 ret = rte_kni_handle_request(NULL);
601 printf("Unexpectedly handle request on NULL device pointer\n");
605 /* test of getting KNI device with pointer to NULL */
606 kni = rte_kni_get(NULL);
609 printf("Unexpectedly get a KNI device with "
610 "NULL name pointer\n");
614 /* test of getting KNI device with an zero length name string */
615 memset(&conf, 0, sizeof(conf));
616 kni = rte_kni_get(conf.name);
619 printf("Unexpectedly get a KNI device with "
620 "zero length name string\n");
624 /* test of getting KNI device with an invalid string name */
625 memset(&conf, 0, sizeof(conf));
626 snprintf(conf.name, sizeof(conf.name), "testing");
627 kni = rte_kni_get(conf.name);
630 printf("Unexpectedly get a KNI device with "
631 "a never used name string\n");
637 rte_eth_dev_stop(port_id);
644 REGISTER_TEST_COMMAND(kni_autotest, test_kni);