1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #include <rte_string_fns.h>
14 #include <rte_mempool.h>
15 #include <rte_ethdev.h>
16 #include <rte_bus_pci.h>
17 #include <rte_cycles.h>
21 #define MAX_PACKET_SZ 2048
22 #define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
23 #define PKT_BURST_SZ 32
24 #define MEMPOOL_CACHE_SZ PKT_BURST_SZ
28 #define KNI_TIMEOUT_MS 5000 /* ms */
30 #define IFCONFIG "/sbin/ifconfig "
31 #define TEST_KNI_PORT "test_kni_port"
32 #define KNI_TEST_MAX_PORTS 4
33 /* The threshold number of mbufs to be transmitted or received. */
34 #define KNI_NUM_MBUF_THRESHOLD 100
35 static int kni_pkt_mtu = 0;
37 struct test_kni_stats {
38 volatile uint64_t ingress;
39 volatile uint64_t egress;
42 static const struct rte_eth_rxconf rx_conf = {
51 static const struct rte_eth_txconf tx_conf = {
61 static const struct rte_eth_conf port_conf = {
70 .mq_mode = ETH_DCB_NONE,
74 static struct rte_kni_ops kni_ops = {
76 .config_network_if = NULL,
79 static unsigned lcore_master, lcore_ingress, lcore_egress;
80 static struct rte_kni *test_kni_ctx;
81 static struct test_kni_stats stats;
83 static volatile uint32_t test_kni_processing_flag;
85 static struct rte_mempool *
86 test_kni_create_mempool(void)
88 struct rte_mempool * mp;
90 mp = rte_mempool_lookup("kni_mempool");
92 mp = rte_pktmbuf_pool_create("kni_mempool",
94 MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ,
100 static struct rte_mempool *
101 test_kni_lookup_mempool(void)
103 return rte_mempool_lookup("kni_mempool");
105 /* Callback for request of changing MTU */
107 kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
109 printf("Change MTU of port %d to %u\n", port_id, new_mtu);
110 kni_pkt_mtu = new_mtu;
111 printf("Change MTU of port %d to %i successfully.\n",
112 port_id, kni_pkt_mtu);
116 * This loop fully tests the basic functions of KNI. e.g. transmitting,
117 * receiving to, from kernel space, and kernel requests.
119 * This is the loop to transmit/receive mbufs to/from kernel interface with
120 * supported by KNI kernel module. The ingress lcore will allocate mbufs and
121 * transmit them to kernel space; while the egress lcore will receive the mbufs
122 * from kernel space and free them.
123 * On the master lcore, several commands will be run to check handling the
124 * kernel requests. And it will finally set the flag to exit the KNI
125 * transmitting/receiving to/from the kernel space.
127 * Note: To support this testing, the KNI kernel module needs to be insmodded
128 * in one of its loopback modes.
131 test_kni_loop(__rte_unused void *arg)
134 unsigned nb_rx, nb_tx, num, i;
135 const unsigned lcore_id = rte_lcore_id();
136 struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
138 if (lcore_id == lcore_master) {
139 rte_delay_ms(KNI_TIMEOUT_MS);
140 /* tests of handling kernel request */
141 if (system(IFCONFIG TEST_KNI_PORT" up") == -1)
143 if (system(IFCONFIG TEST_KNI_PORT" mtu 1400") == -1)
145 if (system(IFCONFIG TEST_KNI_PORT" down") == -1)
147 rte_delay_ms(KNI_TIMEOUT_MS);
148 test_kni_processing_flag = 1;
149 } else if (lcore_id == lcore_ingress) {
150 struct rte_mempool *mp = test_kni_lookup_mempool();
156 if (test_kni_processing_flag)
159 for (nb_rx = 0; nb_rx < PKT_BURST_SZ; nb_rx++) {
160 pkts_burst[nb_rx] = rte_pktmbuf_alloc(mp);
161 if (!pkts_burst[nb_rx])
165 num = rte_kni_tx_burst(test_kni_ctx, pkts_burst,
167 stats.ingress += num;
168 rte_kni_handle_request(test_kni_ctx);
170 for (i = num; i < nb_rx; i++) {
171 rte_pktmbuf_free(pkts_burst[i]);
176 } else if (lcore_id == lcore_egress) {
178 if (test_kni_processing_flag)
180 num = rte_kni_rx_burst(test_kni_ctx, pkts_burst,
183 for (nb_tx = 0; nb_tx < num; nb_tx++)
184 rte_pktmbuf_free(pkts_burst[nb_tx]);
193 test_kni_allocate_lcores(void)
195 unsigned i, count = 0;
197 lcore_master = rte_get_master_lcore();
198 printf("master lcore: %u\n", lcore_master);
199 for (i = 0; i < RTE_MAX_LCORE; i++) {
202 if (rte_lcore_is_enabled(i) && i != lcore_master) {
210 printf("count: %u\n", count);
212 return count == 2 ? 0 : -1;
216 test_kni_register_handler_mp(void)
218 #define TEST_KNI_HANDLE_REQ_COUNT 10 /* 5s */
219 #define TEST_KNI_HANDLE_REQ_INTERVAL 500 /* ms */
220 #define TEST_KNI_MTU 1450
221 #define TEST_KNI_MTU_STR " 1450"
226 printf("Failed to fork a process\n");
228 } else if (pid == 0) {
230 struct rte_kni *kni = rte_kni_get(TEST_KNI_PORT);
231 struct rte_kni_ops ops = {
232 .change_mtu = kni_change_mtu,
233 .config_network_if = NULL,
237 printf("Failed to get KNI named %s\n", TEST_KNI_PORT);
243 /* Check with the invalid parameters */
244 if (rte_kni_register_handlers(kni, NULL) == 0) {
245 printf("Unexpectedly register successuflly "
246 "with NULL ops pointer\n");
249 if (rte_kni_register_handlers(NULL, &ops) == 0) {
250 printf("Unexpectedly register successfully "
251 "to NULL KNI device pointer\n");
255 if (rte_kni_register_handlers(kni, &ops)) {
256 printf("Fail to register ops\n");
260 /* Check registering again after it has been registered */
261 if (rte_kni_register_handlers(kni, &ops) == 0) {
262 printf("Unexpectedly register successfully after "
263 "it has already been registered\n");
268 * Handle the request of setting MTU,
269 * with registered handlers.
271 for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
272 rte_kni_handle_request(kni);
273 if (kni_pkt_mtu == TEST_KNI_MTU)
275 rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
277 if (i >= TEST_KNI_HANDLE_REQ_COUNT) {
278 printf("MTU has not been set\n");
283 if (rte_kni_unregister_handlers(kni) < 0) {
284 printf("Fail to unregister ops\n");
288 /* Check with invalid parameter */
289 if (rte_kni_unregister_handlers(NULL) == 0) {
294 * Handle the request of setting MTU,
295 * without registered handlers.
297 for (i = 0; i < TEST_KNI_HANDLE_REQ_COUNT; i++) {
298 rte_kni_handle_request(kni);
299 if (kni_pkt_mtu != 0)
301 rte_delay_ms(TEST_KNI_HANDLE_REQ_INTERVAL);
303 if (kni_pkt_mtu != 0) {
304 printf("MTU shouldn't be set\n");
313 if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
318 if (system(IFCONFIG TEST_KNI_PORT " mtu" TEST_KNI_MTU_STR)
322 p_ret = wait(&status);
323 if (!WIFEXITED(status)) {
324 printf("Child process (%d) exit abnormally\n", p_ret);
327 if (WEXITSTATUS(status) != 0) {
328 printf("Child process exit with failure\n");
337 test_kni_processing(uint16_t port_id, struct rte_mempool *mp)
342 struct rte_kni_conf conf;
343 struct rte_eth_dev_info info;
344 struct rte_kni_ops ops;
349 memset(&conf, 0, sizeof(conf));
350 memset(&info, 0, sizeof(info));
351 memset(&ops, 0, sizeof(ops));
353 rte_eth_dev_info_get(port_id, &info);
354 conf.addr = info.pci_dev->addr;
355 conf.id = info.pci_dev->id;
356 snprintf(conf.name, sizeof(conf.name), TEST_KNI_PORT);
358 /* core id 1 configured for kernel thread */
361 conf.mbuf_size = MAX_PACKET_SZ;
362 conf.group_id = port_id;
365 ops.port_id = port_id;
367 /* basic test of kni processing */
368 kni = rte_kni_alloc(mp, &conf, &ops);
370 printf("fail to create kni\n");
375 test_kni_processing_flag = 0;
380 * Check multiple processes support on
381 * registerring/unregisterring handlers.
383 if (test_kni_register_handler_mp() < 0) {
384 printf("fail to check multiple process support\n");
389 rte_eal_mp_remote_launch(test_kni_loop, NULL, CALL_MASTER);
390 RTE_LCORE_FOREACH_SLAVE(i) {
391 if (rte_eal_wait_lcore(i) < 0) {
397 * Check if the number of mbufs received from kernel space is equal
398 * to that of transmitted to kernel space
400 if (stats.ingress < KNI_NUM_MBUF_THRESHOLD ||
401 stats.egress < KNI_NUM_MBUF_THRESHOLD) {
402 printf("The ingress/egress number should not be "
403 "less than %u\n", (unsigned)KNI_NUM_MBUF_THRESHOLD);
408 if (rte_kni_release(kni) < 0) {
409 printf("fail to release kni\n");
414 /* test of releasing a released kni device */
415 if (rte_kni_release(kni) == 0) {
416 printf("should not release a released kni device\n");
420 /* test of reusing memzone */
421 kni = rte_kni_alloc(mp, &conf, &ops);
423 printf("fail to create kni\n");
427 /* Release the kni for following testing */
428 if (rte_kni_release(kni) < 0) {
429 printf("fail to release kni\n");
435 if (rte_kni_release(kni) < 0) {
436 printf("fail to release kni\n");
447 uint16_t nb_ports, port_id;
449 struct rte_mempool *mp;
450 struct rte_kni_conf conf;
451 struct rte_eth_dev_info info;
452 struct rte_kni_ops ops;
454 /* Initialize KNI subsytem */
455 rte_kni_init(KNI_TEST_MAX_PORTS);
457 if (test_kni_allocate_lcores() < 0) {
458 printf("No enough lcores for kni processing\n");
462 mp = test_kni_create_mempool();
464 printf("fail to create mempool for kni\n");
468 nb_ports = rte_eth_dev_count();
470 printf("no supported nic port found\n");
474 /* configuring port 0 for the test is enough */
476 ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf);
478 printf("fail to configure port %d\n", port_id);
482 ret = rte_eth_rx_queue_setup(port_id, 0, NB_RXD, SOCKET, &rx_conf, mp);
484 printf("fail to setup rx queue for port %d\n", port_id);
488 ret = rte_eth_tx_queue_setup(port_id, 0, NB_TXD, SOCKET, &tx_conf);
490 printf("fail to setup tx queue for port %d\n", port_id);
494 ret = rte_eth_dev_start(port_id);
496 printf("fail to start port %d\n", port_id);
499 rte_eth_promiscuous_enable(port_id);
501 /* basic test of kni processing */
502 ret = test_kni_processing(port_id, mp);
506 /* test of allocating KNI with NULL mempool pointer */
507 memset(&info, 0, sizeof(info));
508 memset(&conf, 0, sizeof(conf));
509 memset(&ops, 0, sizeof(ops));
510 rte_eth_dev_info_get(port_id, &info);
511 conf.addr = info.pci_dev->addr;
512 conf.id = info.pci_dev->id;
513 conf.group_id = port_id;
514 conf.mbuf_size = MAX_PACKET_SZ;
517 ops.port_id = port_id;
518 kni = rte_kni_alloc(NULL, &conf, &ops);
521 printf("unexpectedly creates kni successfully with NULL "
522 "mempool pointer\n");
526 /* test of allocating KNI without configurations */
527 kni = rte_kni_alloc(mp, NULL, NULL);
530 printf("Unexpectedly allocate KNI device successfully "
531 "without configurations\n");
535 /* test of allocating KNI without a name */
536 memset(&conf, 0, sizeof(conf));
537 memset(&info, 0, sizeof(info));
538 memset(&ops, 0, sizeof(ops));
539 rte_eth_dev_info_get(port_id, &info);
540 conf.addr = info.pci_dev->addr;
541 conf.id = info.pci_dev->id;
542 conf.group_id = port_id;
543 conf.mbuf_size = MAX_PACKET_SZ;
546 ops.port_id = port_id;
547 kni = rte_kni_alloc(mp, &conf, &ops);
550 printf("Unexpectedly allocate a KNI device successfully "
555 /* test of releasing NULL kni context */
556 ret = rte_kni_release(NULL);
559 printf("unexpectedly release kni successfully\n");
563 /* test of handling request on NULL device pointer */
564 ret = rte_kni_handle_request(NULL);
567 printf("Unexpectedly handle request on NULL device pointer\n");
571 /* test of getting KNI device with pointer to NULL */
572 kni = rte_kni_get(NULL);
575 printf("Unexpectedly get a KNI device with "
576 "NULL name pointer\n");
580 /* test of getting KNI device with an zero length name string */
581 memset(&conf, 0, sizeof(conf));
582 kni = rte_kni_get(conf.name);
585 printf("Unexpectedly get a KNI device with "
586 "zero length name string\n");
590 /* test of getting KNI device with an invalid string name */
591 memset(&conf, 0, sizeof(conf));
592 snprintf(conf.name, sizeof(conf.name), "testing");
593 kni = rte_kni_get(conf.name);
596 printf("Unexpectedly get a KNI device with "
597 "a never used name string\n");
603 rte_eth_dev_stop(port_id);
608 REGISTER_TEST_COMMAND(kni_autotest, test_kni);