kni: uninitialize global variables
[dpdk.git] / lib / librte_eal / linuxapp / kni / kni_misc.c
1 /*-
2  * GPL LICENSE SUMMARY
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *
6  *   This program is free software; you can redistribute it and/or modify
7  *   it under the terms of version 2 of the GNU General Public License as
8  *   published by the Free Software Foundation.
9  *
10  *   This program is distributed in the hope that it will be useful, but
11  *   WITHOUT ANY WARRANTY; without even the implied warranty of
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *   General Public License for more details.
14  *
15  *   You should have received a copy of the GNU General Public License
16  *   along with this program; if not, write to the Free Software
17  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  *   The full GNU General Public License is included in this distribution
19  *   in the file called LICENSE.GPL.
20  *
21  *   Contact Information:
22  *   Intel Corporation
23  */
24
25 #include <linux/version.h>
26 #include <linux/module.h>
27 #include <linux/miscdevice.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/pci.h>
31 #include <linux/kthread.h>
32 #include <linux/rwsem.h>
33 #include <linux/mutex.h>
34 #include <linux/nsproxy.h>
35 #include <net/net_namespace.h>
36 #include <net/netns/generic.h>
37
38 #include <exec-env/rte_kni_common.h>
39
40 #include "compat.h"
41 #include "kni_dev.h"
42
43 MODULE_LICENSE("Dual BSD/GPL");
44 MODULE_AUTHOR("Intel Corporation");
45 MODULE_DESCRIPTION("Kernel Module for managing kni devices");
46
47 #define KNI_RX_LOOP_NUM 1000
48
49 #define KNI_MAX_DEVICES 32
50
51 extern const struct pci_device_id ixgbe_pci_tbl[];
52 extern const struct pci_device_id igb_pci_tbl[];
53
54 static int kni_open(struct inode *inode, struct file *file);
55 static int kni_release(struct inode *inode, struct file *file);
56 static int kni_ioctl(struct inode *inode, unsigned int ioctl_num,
57                                         unsigned long ioctl_param);
58 static int kni_compat_ioctl(struct inode *inode, unsigned int ioctl_num,
59                                                 unsigned long ioctl_param);
60 static int kni_dev_remove(struct kni_dev *dev);
61
62 static int __init kni_parse_kthread_mode(void);
63
64 /* KNI processing for single kernel thread mode */
65 static int kni_thread_single(void *unused);
66 /* KNI processing for multiple kernel thread mode */
67 static int kni_thread_multiple(void *param);
68
69 static struct file_operations kni_fops = {
70         .owner = THIS_MODULE,
71         .open = kni_open,
72         .release = kni_release,
73         .unlocked_ioctl = (void *)kni_ioctl,
74         .compat_ioctl = (void *)kni_compat_ioctl,
75 };
76
77 static struct miscdevice kni_misc = {
78         .minor = MISC_DYNAMIC_MINOR,
79         .name = KNI_DEVICE,
80         .fops = &kni_fops,
81 };
82
83 /* loopback mode */
84 static char *lo_mode;
85
86 /* Kernel thread mode */
87 static char *kthread_mode;
88 static unsigned int multiple_kthread_on;
89
90 #define KNI_DEV_IN_USE_BIT_NUM 0 /* Bit number for device in use */
91
92 static int kni_net_id;
93
94 struct kni_net {
95         unsigned long device_in_use; /* device in use flag */
96         struct mutex kni_kthread_lock;
97         struct task_struct *kni_kthread;
98         struct rw_semaphore kni_list_lock;
99         struct list_head kni_list_head;
100 };
101
102 static int __net_init kni_init_net(struct net *net)
103 {
104 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
105         struct kni_net *knet = net_generic(net, kni_net_id);
106         memset(knet, 0, sizeof(*knet));
107 #else
108         struct kni_net *knet;
109         int ret;
110
111         knet = kzalloc(sizeof(struct kni_net), GFP_KERNEL);
112         if (!knet) {
113                 ret = -ENOMEM;
114                 return ret;
115         }
116 #endif
117
118         /* Clear the bit of device in use */
119         clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
120
121         mutex_init(&knet->kni_kthread_lock);
122
123         init_rwsem(&knet->kni_list_lock);
124         INIT_LIST_HEAD(&knet->kni_list_head);
125
126 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
127         return 0;
128 #else
129         ret = net_assign_generic(net, kni_net_id, knet);
130         if (ret < 0)
131                 kfree(knet);
132
133         return ret;
134 #endif
135 }
136
137 static void __net_exit kni_exit_net(struct net *net)
138 {
139         struct kni_net *knet = net_generic(net, kni_net_id);
140         mutex_destroy(&knet->kni_kthread_lock);
141 #ifndef HAVE_SIMPLIFIED_PERNET_OPERATIONS
142         kfree(knet);
143 #endif
144 }
145
146 static struct pernet_operations kni_net_ops = {
147         .init = kni_init_net,
148         .exit = kni_exit_net,
149 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
150         .id   = &kni_net_id,
151         .size = sizeof(struct kni_net),
152 #endif
153 };
154
155 static int __init
156 kni_init(void)
157 {
158         int rc;
159
160         KNI_PRINT("######## DPDK kni module loading ########\n");
161
162         if (kni_parse_kthread_mode() < 0) {
163                 KNI_ERR("Invalid parameter for kthread_mode\n");
164                 return -EINVAL;
165         }
166
167         if (multiple_kthread_on == 0)
168                 KNI_PRINT("Single kernel thread for all KNI devices\n");
169         else
170                 KNI_PRINT("Multiple kernel thread mode enabled\n");
171
172 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
173         rc = register_pernet_subsys(&kni_net_ops);
174 #else
175         rc = register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
176 #endif
177         if (rc)
178                 return -EPERM;
179
180         rc = misc_register(&kni_misc);
181         if (rc != 0) {
182                 KNI_ERR("Misc registration failed\n");
183                 goto out;
184         }
185
186         /* Configure the lo mode according to the input parameter */
187         kni_net_config_lo_mode(lo_mode);
188
189         KNI_PRINT("######## DPDK kni module loaded  ########\n");
190
191         return 0;
192
193 out:
194 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
195         unregister_pernet_subsys(&kni_net_ops);
196 #else
197         unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
198 #endif
199         return rc;
200 }
201
202 static void __exit
203 kni_exit(void)
204 {
205         misc_deregister(&kni_misc);
206 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
207         unregister_pernet_subsys(&kni_net_ops);
208 #else
209         unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
210 #endif
211         KNI_PRINT("####### DPDK kni module unloaded  #######\n");
212 }
213
214 static int __init
215 kni_parse_kthread_mode(void)
216 {
217         if (!kthread_mode)
218                 return 0;
219
220         if (strcmp(kthread_mode, "single") == 0)
221                 return 0;
222         else if (strcmp(kthread_mode, "multiple") == 0)
223                 multiple_kthread_on = 1;
224         else
225                 return -1;
226
227         return 0;
228 }
229
230 static int
231 kni_open(struct inode *inode, struct file *file)
232 {
233         struct net *net = current->nsproxy->net_ns;
234         struct kni_net *knet = net_generic(net, kni_net_id);
235
236         /* kni device can be opened by one user only per netns */
237         if (test_and_set_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use))
238                 return -EBUSY;
239
240         file->private_data = get_net(net);
241         KNI_PRINT("/dev/kni opened\n");
242
243         return 0;
244 }
245
246 static int
247 kni_release(struct inode *inode, struct file *file)
248 {
249         struct net *net = file->private_data;
250         struct kni_net *knet = net_generic(net, kni_net_id);
251         struct kni_dev *dev, *n;
252
253         /* Stop kernel thread for single mode */
254         if (multiple_kthread_on == 0) {
255                 mutex_lock(&knet->kni_kthread_lock);
256                 /* Stop kernel thread */
257                 if (knet->kni_kthread != NULL) {
258                         kthread_stop(knet->kni_kthread);
259                         knet->kni_kthread = NULL;
260                 }
261                 mutex_unlock(&knet->kni_kthread_lock);
262         }
263
264         down_write(&knet->kni_list_lock);
265         list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
266                 /* Stop kernel thread for multiple mode */
267                 if (multiple_kthread_on && dev->pthread != NULL) {
268                         kthread_stop(dev->pthread);
269                         dev->pthread = NULL;
270                 }
271
272 #ifdef RTE_KNI_VHOST
273                 kni_vhost_backend_release(dev);
274 #endif
275                 kni_dev_remove(dev);
276                 list_del(&dev->list);
277         }
278         up_write(&knet->kni_list_lock);
279
280         /* Clear the bit of device in use */
281         clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
282
283         put_net(net);
284         KNI_PRINT("/dev/kni closed\n");
285
286         return 0;
287 }
288
289 static int
290 kni_thread_single(void *data)
291 {
292         struct kni_net *knet = data;
293         int j;
294         struct kni_dev *dev;
295
296         while (!kthread_should_stop()) {
297                 down_read(&knet->kni_list_lock);
298                 for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
299                         list_for_each_entry(dev, &knet->kni_list_head, list) {
300 #ifdef RTE_KNI_VHOST
301                                 kni_chk_vhost_rx(dev);
302 #else
303                                 kni_net_rx(dev);
304 #endif
305                                 kni_net_poll_resp(dev);
306                         }
307                 }
308                 up_read(&knet->kni_list_lock);
309 #ifdef RTE_KNI_PREEMPT_DEFAULT
310                 /* reschedule out for a while */
311                 schedule_timeout_interruptible(usecs_to_jiffies( \
312                                 KNI_KTHREAD_RESCHEDULE_INTERVAL));
313 #endif
314         }
315
316         return 0;
317 }
318
319 static int
320 kni_thread_multiple(void *param)
321 {
322         int j;
323         struct kni_dev *dev = (struct kni_dev *)param;
324
325         while (!kthread_should_stop()) {
326                 for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
327 #ifdef RTE_KNI_VHOST
328                         kni_chk_vhost_rx(dev);
329 #else
330                         kni_net_rx(dev);
331 #endif
332                         kni_net_poll_resp(dev);
333                 }
334 #ifdef RTE_KNI_PREEMPT_DEFAULT
335                 schedule_timeout_interruptible(usecs_to_jiffies( \
336                                 KNI_KTHREAD_RESCHEDULE_INTERVAL));
337 #endif
338         }
339
340         return 0;
341 }
342
343 static int
344 kni_dev_remove(struct kni_dev *dev)
345 {
346         if (!dev)
347                 return -ENODEV;
348
349         if (dev->pci_dev) {
350                 if (pci_match_id(ixgbe_pci_tbl, dev->pci_dev))
351                         ixgbe_kni_remove(dev->pci_dev);
352                 else if (pci_match_id(igb_pci_tbl, dev->pci_dev))
353                         igb_kni_remove(dev->pci_dev);
354         }
355
356         if (dev->net_dev) {
357                 unregister_netdev(dev->net_dev);
358                 free_netdev(dev->net_dev);
359         }
360
361         return 0;
362 }
363
364 static int
365 kni_check_param(struct kni_dev *kni, struct rte_kni_device_info *dev)
366 {
367         if (!kni || !dev)
368                 return -1;
369
370         /* Check if network name has been used */
371         if (!strncmp(kni->name, dev->name, RTE_KNI_NAMESIZE)) {
372                 KNI_ERR("KNI name %s duplicated\n", dev->name);
373                 return -1;
374         }
375
376         return 0;
377 }
378
379 static int
380 kni_run_thread(struct kni_net *knet, struct kni_dev *kni, uint8_t force_bind)
381 {
382         /**
383          * Create a new kernel thread for multiple mode, set its core affinity,
384          * and finally wake it up.
385          */
386         if (multiple_kthread_on) {
387                 kni->pthread = kthread_create(kni_thread_multiple,
388                         (void *)kni, "kni_%s", kni->name);
389                 if (IS_ERR(kni->pthread)) {
390                         kni_dev_remove(kni);
391                         return -ECANCELED;
392                 }
393
394                 if (force_bind)
395                         kthread_bind(kni->pthread, kni->core_id);
396                 wake_up_process(kni->pthread);
397         } else {
398                 mutex_lock(&knet->kni_kthread_lock);
399
400                 if (knet->kni_kthread == NULL) {
401                         knet->kni_kthread = kthread_create(kni_thread_single,
402                                 (void *)knet, "kni_single");
403                         if (IS_ERR(knet->kni_kthread)) {
404                                 mutex_unlock(&knet->kni_kthread_lock);
405                                 kni_dev_remove(kni);
406                                 return -ECANCELED;
407                         }
408
409                         if (force_bind)
410                                 kthread_bind(knet->kni_kthread, kni->core_id);
411                         wake_up_process(knet->kni_kthread);
412                 }
413
414                 mutex_unlock(&knet->kni_kthread_lock);
415         }
416
417         return 0;
418 }
419
420 static int
421 kni_ioctl_create(struct net *net,
422                 unsigned int ioctl_num, unsigned long ioctl_param)
423 {
424         struct kni_net *knet = net_generic(net, kni_net_id);
425         int ret;
426         struct rte_kni_device_info dev_info;
427         struct pci_dev *pci = NULL;
428         struct pci_dev *found_pci = NULL;
429         struct net_device *net_dev = NULL;
430         struct net_device *lad_dev = NULL;
431         struct kni_dev *kni, *dev, *n;
432
433         printk(KERN_INFO "KNI: Creating kni...\n");
434         /* Check the buffer size, to avoid warning */
435         if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
436                 return -EINVAL;
437
438         /* Copy kni info from user space */
439         ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
440         if (ret) {
441                 KNI_ERR("copy_from_user in kni_ioctl_create");
442                 return -EIO;
443         }
444
445         /**
446          * Check if the cpu core id is valid for binding.
447          */
448         if (dev_info.force_bind && !cpu_online(dev_info.core_id)) {
449                 KNI_ERR("cpu %u is not online\n", dev_info.core_id);
450                 return -EINVAL;
451         }
452
453         /* Check if it has been created */
454         down_read(&knet->kni_list_lock);
455         list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
456                 if (kni_check_param(dev, &dev_info) < 0) {
457                         up_read(&knet->kni_list_lock);
458                         return -EINVAL;
459                 }
460         }
461         up_read(&knet->kni_list_lock);
462
463         net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name,
464 #ifdef NET_NAME_UNKNOWN
465                                                         NET_NAME_UNKNOWN,
466 #endif
467                                                         kni_net_init);
468         if (net_dev == NULL) {
469                 KNI_ERR("error allocating device \"%s\"\n", dev_info.name);
470                 return -EBUSY;
471         }
472
473         dev_net_set(net_dev, net);
474
475         kni = netdev_priv(net_dev);
476
477         kni->net_dev = net_dev;
478         kni->group_id = dev_info.group_id;
479         kni->core_id = dev_info.core_id;
480         strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE);
481
482         /* Translate user space info into kernel space info */
483         kni->tx_q = phys_to_virt(dev_info.tx_phys);
484         kni->rx_q = phys_to_virt(dev_info.rx_phys);
485         kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
486         kni->free_q = phys_to_virt(dev_info.free_phys);
487
488         kni->req_q = phys_to_virt(dev_info.req_phys);
489         kni->resp_q = phys_to_virt(dev_info.resp_phys);
490         kni->sync_va = dev_info.sync_va;
491         kni->sync_kva = phys_to_virt(dev_info.sync_phys);
492
493 #ifdef RTE_KNI_VHOST
494         kni->vhost_queue = NULL;
495         kni->vq_status = BE_STOP;
496 #endif
497         kni->mbuf_size = dev_info.mbuf_size;
498
499         KNI_PRINT("tx_phys:      0x%016llx, tx_q addr:      0x%p\n",
500                 (unsigned long long) dev_info.tx_phys, kni->tx_q);
501         KNI_PRINT("rx_phys:      0x%016llx, rx_q addr:      0x%p\n",
502                 (unsigned long long) dev_info.rx_phys, kni->rx_q);
503         KNI_PRINT("alloc_phys:   0x%016llx, alloc_q addr:   0x%p\n",
504                 (unsigned long long) dev_info.alloc_phys, kni->alloc_q);
505         KNI_PRINT("free_phys:    0x%016llx, free_q addr:    0x%p\n",
506                 (unsigned long long) dev_info.free_phys, kni->free_q);
507         KNI_PRINT("req_phys:     0x%016llx, req_q addr:     0x%p\n",
508                 (unsigned long long) dev_info.req_phys, kni->req_q);
509         KNI_PRINT("resp_phys:    0x%016llx, resp_q addr:    0x%p\n",
510                 (unsigned long long) dev_info.resp_phys, kni->resp_q);
511         KNI_PRINT("mbuf_size:    %u\n", kni->mbuf_size);
512
513         KNI_DBG("PCI: %02x:%02x.%02x %04x:%04x\n",
514                                         dev_info.bus,
515                                         dev_info.devid,
516                                         dev_info.function,
517                                         dev_info.vendor_id,
518                                         dev_info.device_id);
519
520         pci = pci_get_device(dev_info.vendor_id, dev_info.device_id, NULL);
521
522         /* Support Ethtool */
523         while (pci) {
524                 KNI_PRINT("pci_bus: %02x:%02x:%02x \n",
525                                         pci->bus->number,
526                                         PCI_SLOT(pci->devfn),
527                                         PCI_FUNC(pci->devfn));
528
529                 if ((pci->bus->number == dev_info.bus) &&
530                         (PCI_SLOT(pci->devfn) == dev_info.devid) &&
531                         (PCI_FUNC(pci->devfn) == dev_info.function)) {
532                         found_pci = pci;
533
534                         if (pci_match_id(ixgbe_pci_tbl, found_pci))
535                                 ret = ixgbe_kni_probe(found_pci, &lad_dev);
536                         else if (pci_match_id(igb_pci_tbl, found_pci))
537                                 ret = igb_kni_probe(found_pci, &lad_dev);
538                         else
539                                 ret = -1;
540
541                         KNI_DBG("PCI found: pci=0x%p, lad_dev=0x%p\n",
542                                                         pci, lad_dev);
543                         if (ret == 0) {
544                                 kni->lad_dev = lad_dev;
545                                 kni_set_ethtool_ops(kni->net_dev);
546                         } else {
547                                 KNI_ERR("Device not supported by ethtool");
548                                 kni->lad_dev = NULL;
549                         }
550
551                         kni->pci_dev = found_pci;
552                         kni->device_id = dev_info.device_id;
553                         break;
554                 }
555                 pci = pci_get_device(dev_info.vendor_id,
556                                 dev_info.device_id, pci);
557         }
558         if (pci)
559                 pci_dev_put(pci);
560
561         if (kni->lad_dev)
562                 memcpy(net_dev->dev_addr, kni->lad_dev->dev_addr, ETH_ALEN);
563         else
564                 /*
565                  * Generate random mac address. eth_random_addr() is the newer
566                  * version of generating mac address in linux kernel.
567                  */
568                 random_ether_addr(net_dev->dev_addr);
569
570         ret = register_netdev(net_dev);
571         if (ret) {
572                 KNI_ERR("error %i registering device \"%s\"\n",
573                                         ret, dev_info.name);
574                 kni->net_dev = NULL;
575                 kni_dev_remove(kni);
576                 free_netdev(net_dev);
577                 return -ENODEV;
578         }
579
580 #ifdef RTE_KNI_VHOST
581         kni_vhost_init(kni);
582 #endif
583
584         ret = kni_run_thread(knet, kni, dev_info.force_bind);
585         if (ret != 0)
586                 return ret;
587
588         down_write(&knet->kni_list_lock);
589         list_add(&kni->list, &knet->kni_list_head);
590         up_write(&knet->kni_list_lock);
591
592         return 0;
593 }
594
595 static int
596 kni_ioctl_release(struct net *net,
597                 unsigned int ioctl_num, unsigned long ioctl_param)
598 {
599         struct kni_net *knet = net_generic(net, kni_net_id);
600         int ret = -EINVAL;
601         struct kni_dev *dev, *n;
602         struct rte_kni_device_info dev_info;
603
604         if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
605                         return -EINVAL;
606
607         ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
608         if (ret) {
609                 KNI_ERR("copy_from_user in kni_ioctl_release");
610                 return -EIO;
611         }
612
613         /* Release the network device according to its name */
614         if (strlen(dev_info.name) == 0)
615                 return ret;
616
617         down_write(&knet->kni_list_lock);
618         list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
619                 if (strncmp(dev->name, dev_info.name, RTE_KNI_NAMESIZE) != 0)
620                         continue;
621
622                 if (multiple_kthread_on && dev->pthread != NULL) {
623                         kthread_stop(dev->pthread);
624                         dev->pthread = NULL;
625                 }
626
627 #ifdef RTE_KNI_VHOST
628                 kni_vhost_backend_release(dev);
629 #endif
630                 kni_dev_remove(dev);
631                 list_del(&dev->list);
632                 ret = 0;
633                 break;
634         }
635         up_write(&knet->kni_list_lock);
636         printk(KERN_INFO "KNI: %s release kni named %s\n",
637                 (ret == 0 ? "Successfully" : "Unsuccessfully"), dev_info.name);
638
639         return ret;
640 }
641
642 static int
643 kni_ioctl(struct inode *inode,
644         unsigned int ioctl_num,
645         unsigned long ioctl_param)
646 {
647         int ret = -EINVAL;
648         struct net *net = current->nsproxy->net_ns;
649
650         KNI_DBG("IOCTL num=0x%0x param=0x%0lx\n", ioctl_num, ioctl_param);
651
652         /*
653          * Switch according to the ioctl called
654          */
655         switch (_IOC_NR(ioctl_num)) {
656         case _IOC_NR(RTE_KNI_IOCTL_TEST):
657                 /* For test only, not used */
658                 break;
659         case _IOC_NR(RTE_KNI_IOCTL_CREATE):
660                 ret = kni_ioctl_create(net, ioctl_num, ioctl_param);
661                 break;
662         case _IOC_NR(RTE_KNI_IOCTL_RELEASE):
663                 ret = kni_ioctl_release(net, ioctl_num, ioctl_param);
664                 break;
665         default:
666                 KNI_DBG("IOCTL default\n");
667                 break;
668         }
669
670         return ret;
671 }
672
673 static int
674 kni_compat_ioctl(struct inode *inode,
675                 unsigned int ioctl_num,
676                 unsigned long ioctl_param)
677 {
678         /* 32 bits app on 64 bits OS to be supported later */
679         KNI_PRINT("Not implemented.\n");
680
681         return -EINVAL;
682 }
683
684 module_init(kni_init);
685 module_exit(kni_exit);
686
687 module_param(lo_mode, charp, S_IRUGO | S_IWUSR);
688 MODULE_PARM_DESC(lo_mode,
689 "KNI loopback mode (default=lo_mode_none):\n"
690 "    lo_mode_none        Kernel loopback disabled\n"
691 "    lo_mode_fifo        Enable kernel loopback with fifo\n"
692 "    lo_mode_fifo_skb    Enable kernel loopback with fifo and skb buffer\n"
693 "\n"
694 );
695
696 module_param(kthread_mode, charp, S_IRUGO);
697 MODULE_PARM_DESC(kthread_mode,
698 "Kernel thread mode (default=single):\n"
699 "    single    Single kernel thread mode enabled.\n"
700 "    multiple  Multiple kernel thread mode enabled.\n"
701 "\n"
702 );