kni: fix crash when removing interface
[dpdk.git] / lib / librte_eal / linuxapp / kni / kni_misc.c
1 /*-
2  * GPL LICENSE SUMMARY
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *
6  *   This program is free software; you can redistribute it and/or modify
7  *   it under the terms of version 2 of the GNU General Public License as
8  *   published by the Free Software Foundation.
9  *
10  *   This program is distributed in the hope that it will be useful, but
11  *   WITHOUT ANY WARRANTY; without even the implied warranty of
12  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  *   General Public License for more details.
14  *
15  *   You should have received a copy of the GNU General Public License
16  *   along with this program; if not, write to the Free Software
17  *   Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18  *   The full GNU General Public License is included in this distribution
19  *   in the file called LICENSE.GPL.
20  *
21  *   Contact Information:
22  *   Intel Corporation
23  */
24
25 #include <linux/version.h>
26 #include <linux/module.h>
27 #include <linux/miscdevice.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/pci.h>
31 #include <linux/kthread.h>
32 #include <linux/rwsem.h>
33 #include <linux/nsproxy.h>
34 #include <net/net_namespace.h>
35 #include <net/netns/generic.h>
36
37 #include <exec-env/rte_kni_common.h>
38
39 #include "compat.h"
40 #include "kni_dev.h"
41
42 MODULE_LICENSE("Dual BSD/GPL");
43 MODULE_AUTHOR("Intel Corporation");
44 MODULE_DESCRIPTION("Kernel Module for managing kni devices");
45
46 #define KNI_RX_LOOP_NUM 1000
47
48 #define KNI_MAX_DEVICES 32
49
50 extern void kni_net_rx(struct kni_dev *kni);
51 extern void kni_net_init(struct net_device *dev);
52 extern void kni_net_config_lo_mode(char *lo_str);
53 extern void kni_net_poll_resp(struct kni_dev *kni);
54 extern void kni_set_ethtool_ops(struct net_device *netdev);
55
56 extern int ixgbe_kni_probe(struct pci_dev *pdev, struct net_device **lad_dev);
57 extern void ixgbe_kni_remove(struct pci_dev *pdev);
58 extern struct pci_device_id ixgbe_pci_tbl[];
59 extern int igb_kni_probe(struct pci_dev *pdev, struct net_device **lad_dev);
60 extern void igb_kni_remove(struct pci_dev *pdev);
61 extern struct pci_device_id igb_pci_tbl[];
62
63 static int kni_open(struct inode *inode, struct file *file);
64 static int kni_release(struct inode *inode, struct file *file);
65 static int kni_ioctl(struct inode *inode, unsigned int ioctl_num,
66                                         unsigned long ioctl_param);
67 static int kni_compat_ioctl(struct inode *inode, unsigned int ioctl_num,
68                                                 unsigned long ioctl_param);
69 static int kni_dev_remove(struct kni_dev *dev);
70
71 static int __init kni_parse_kthread_mode(void);
72
73 /* KNI processing for single kernel thread mode */
74 static int kni_thread_single(void *unused);
75 /* KNI processing for multiple kernel thread mode */
76 static int kni_thread_multiple(void *param);
77
78 static struct file_operations kni_fops = {
79         .owner = THIS_MODULE,
80         .open = kni_open,
81         .release = kni_release,
82         .unlocked_ioctl = (void *)kni_ioctl,
83         .compat_ioctl = (void *)kni_compat_ioctl,
84 };
85
86 static struct miscdevice kni_misc = {
87         .minor = MISC_DYNAMIC_MINOR,
88         .name = KNI_DEVICE,
89         .fops = &kni_fops,
90 };
91
92 /* loopback mode */
93 static char *lo_mode = NULL;
94
95 /* Kernel thread mode */
96 static char *kthread_mode = NULL;
97 static unsigned multiple_kthread_on = 0;
98
99 #define KNI_DEV_IN_USE_BIT_NUM 0 /* Bit number for device in use */
100
101 static int kni_net_id;
102
103 struct kni_net {
104         unsigned long device_in_use; /* device in use flag */
105         struct task_struct *kni_kthread;
106         struct rw_semaphore kni_list_lock;
107         struct list_head kni_list_head;
108 };
109
110 static int __net_init kni_init_net(struct net *net)
111 {
112 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
113         struct kni_net *knet = net_generic(net, kni_net_id);
114 #else
115         struct kni_net *knet;
116         int ret;
117
118         knet = kmalloc(sizeof(struct kni_net), GFP_KERNEL);
119         if (!knet) {
120                 ret = -ENOMEM;
121                 return ret;
122         }
123 #endif
124
125         /* Clear the bit of device in use */
126         clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
127
128         init_rwsem(&knet->kni_list_lock);
129         INIT_LIST_HEAD(&knet->kni_list_head);
130
131 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
132         return 0;
133 #else
134         ret = net_assign_generic(net, kni_net_id, knet);
135         if (ret < 0)
136                 kfree(knet);
137
138         return ret;
139 #endif
140 }
141
142 static void __net_exit kni_exit_net(struct net *net)
143 {
144 #ifndef HAVE_SIMPLIFIED_PERNET_OPERATIONS
145         struct kni_net *knet = net_generic(net, kni_net_id);
146
147         kfree(knet);
148 #endif
149 }
150
151 static struct pernet_operations kni_net_ops = {
152         .init = kni_init_net,
153         .exit = kni_exit_net,
154 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
155         .id   = &kni_net_id,
156         .size = sizeof(struct kni_net),
157 #endif
158 };
159
160 static int __init
161 kni_init(void)
162 {
163         int rc;
164
165         KNI_PRINT("######## DPDK kni module loading ########\n");
166
167         if (kni_parse_kthread_mode() < 0) {
168                 KNI_ERR("Invalid parameter for kthread_mode\n");
169                 return -EINVAL;
170         }
171
172 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
173         rc = register_pernet_subsys(&kni_net_ops);
174 #else
175         rc = register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
176 #endif
177         if (rc)
178                 return -EPERM;
179
180         rc = misc_register(&kni_misc);
181         if (rc != 0) {
182                 KNI_ERR("Misc registration failed\n");
183                 goto out;
184         }
185
186         /* Configure the lo mode according to the input parameter */
187         kni_net_config_lo_mode(lo_mode);
188
189         KNI_PRINT("######## DPDK kni module loaded  ########\n");
190
191         return 0;
192
193 out:
194 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
195         unregister_pernet_subsys(&kni_net_ops);
196 #else
197         register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
198 #endif
199         return rc;
200 }
201
202 static void __exit
203 kni_exit(void)
204 {
205         misc_deregister(&kni_misc);
206 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
207         unregister_pernet_subsys(&kni_net_ops);
208 #else
209         register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
210 #endif
211         KNI_PRINT("####### DPDK kni module unloaded  #######\n");
212 }
213
214 static int __init
215 kni_parse_kthread_mode(void)
216 {
217         if (!kthread_mode)
218                 return 0;
219
220         if (strcmp(kthread_mode, "single") == 0)
221                 return 0;
222         else if (strcmp(kthread_mode, "multiple") == 0)
223                 multiple_kthread_on = 1;
224         else
225                 return -1;
226
227         return 0;
228 }
229
230 static int
231 kni_open(struct inode *inode, struct file *file)
232 {
233         struct net *net = current->nsproxy->net_ns;
234         struct kni_net *knet = net_generic(net, kni_net_id);
235
236         /* kni device can be opened by one user only per netns */
237         if (test_and_set_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use))
238                 return -EBUSY;
239
240         /* Create kernel thread for single mode */
241         if (multiple_kthread_on == 0) {
242                 KNI_PRINT("Single kernel thread for all KNI devices\n");
243                 /* Create kernel thread for RX */
244                 knet->kni_kthread = kthread_run(kni_thread_single, (void *)knet,
245                                                 "kni_single");
246                 if (IS_ERR(knet->kni_kthread)) {
247                         KNI_ERR("Unable to create kernel threaed\n");
248                         return PTR_ERR(knet->kni_kthread);
249                 }
250         } else
251                 KNI_PRINT("Multiple kernel thread mode enabled\n");
252
253         file->private_data = get_net(net);
254         KNI_PRINT("/dev/kni opened\n");
255
256         return 0;
257 }
258
259 static int
260 kni_release(struct inode *inode, struct file *file)
261 {
262         struct net *net = file->private_data;
263         struct kni_net *knet = net_generic(net, kni_net_id);
264         struct kni_dev *dev, *n;
265
266         /* Stop kernel thread for single mode */
267         if (multiple_kthread_on == 0) {
268                 /* Stop kernel thread */
269                 kthread_stop(knet->kni_kthread);
270                 knet->kni_kthread = NULL;
271         }
272
273         down_write(&knet->kni_list_lock);
274         list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
275                 /* Stop kernel thread for multiple mode */
276                 if (multiple_kthread_on && dev->pthread != NULL) {
277                         kthread_stop(dev->pthread);
278                         dev->pthread = NULL;
279                 }
280
281 #ifdef RTE_KNI_VHOST
282                 kni_vhost_backend_release(dev);
283 #endif
284                 kni_dev_remove(dev);
285                 list_del(&dev->list);
286         }
287         up_write(&knet->kni_list_lock);
288
289         /* Clear the bit of device in use */
290         clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
291
292         put_net(net);
293         KNI_PRINT("/dev/kni closed\n");
294
295         return 0;
296 }
297
298 static int
299 kni_thread_single(void *data)
300 {
301         struct kni_net *knet = data;
302         int j;
303         struct kni_dev *dev;
304
305         while (!kthread_should_stop()) {
306                 down_read(&knet->kni_list_lock);
307                 for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
308                         list_for_each_entry(dev, &knet->kni_list_head, list) {
309 #ifdef RTE_KNI_VHOST
310                                 kni_chk_vhost_rx(dev);
311 #else
312                                 kni_net_rx(dev);
313 #endif
314                                 kni_net_poll_resp(dev);
315                         }
316                 }
317                 up_read(&knet->kni_list_lock);
318 #ifdef RTE_KNI_PREEMPT_DEFAULT
319                 /* reschedule out for a while */
320                 schedule_timeout_interruptible(usecs_to_jiffies( \
321                                 KNI_KTHREAD_RESCHEDULE_INTERVAL));
322 #endif
323         }
324
325         return 0;
326 }
327
328 static int
329 kni_thread_multiple(void *param)
330 {
331         int j;
332         struct kni_dev *dev = (struct kni_dev *)param;
333
334         while (!kthread_should_stop()) {
335                 for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
336 #ifdef RTE_KNI_VHOST
337                         kni_chk_vhost_rx(dev);
338 #else
339                         kni_net_rx(dev);
340 #endif
341                         kni_net_poll_resp(dev);
342                 }
343 #ifdef RTE_KNI_PREEMPT_DEFAULT
344                 schedule_timeout_interruptible(usecs_to_jiffies( \
345                                 KNI_KTHREAD_RESCHEDULE_INTERVAL));
346 #endif
347         }
348
349         return 0;
350 }
351
352 static int
353 kni_dev_remove(struct kni_dev *dev)
354 {
355         if (!dev)
356                 return -ENODEV;
357
358         if (dev->pci_dev) {
359                 if (pci_match_id(ixgbe_pci_tbl, dev->pci_dev))
360                         ixgbe_kni_remove(dev->pci_dev);
361                 else if (pci_match_id(igb_pci_tbl, dev->pci_dev))
362                         igb_kni_remove(dev->pci_dev);
363         }
364
365         if (dev->net_dev) {
366                 unregister_netdev(dev->net_dev);
367                 free_netdev(dev->net_dev);
368         }
369
370         return 0;
371 }
372
373 static int
374 kni_check_param(struct kni_dev *kni, struct rte_kni_device_info *dev)
375 {
376         if (!kni || !dev)
377                 return -1;
378
379         /* Check if network name has been used */
380         if (!strncmp(kni->name, dev->name, RTE_KNI_NAMESIZE)) {
381                 KNI_ERR("KNI name %s duplicated\n", dev->name);
382                 return -1;
383         }
384
385         return 0;
386 }
387
388 static int
389 kni_ioctl_create(struct net *net,
390                 unsigned int ioctl_num, unsigned long ioctl_param)
391 {
392         struct kni_net *knet = net_generic(net, kni_net_id);
393         int ret;
394         struct rte_kni_device_info dev_info;
395         struct pci_dev *pci = NULL;
396         struct pci_dev *found_pci = NULL;
397         struct net_device *net_dev = NULL;
398         struct net_device *lad_dev = NULL;
399         struct kni_dev *kni, *dev, *n;
400
401         printk(KERN_INFO "KNI: Creating kni...\n");
402         /* Check the buffer size, to avoid warning */
403         if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
404                 return -EINVAL;
405
406         /* Copy kni info from user space */
407         ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
408         if (ret) {
409                 KNI_ERR("copy_from_user in kni_ioctl_create");
410                 return -EIO;
411         }
412
413         /**
414          * Check if the cpu core id is valid for binding,
415          * for multiple kernel thread mode.
416          */
417         if (multiple_kthread_on && dev_info.force_bind &&
418                                 !cpu_online(dev_info.core_id)) {
419                 KNI_ERR("cpu %u is not online\n", dev_info.core_id);
420                 return -EINVAL;
421         }
422
423         /* Check if it has been created */
424         down_read(&knet->kni_list_lock);
425         list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
426                 if (kni_check_param(dev, &dev_info) < 0) {
427                         up_read(&knet->kni_list_lock);
428                         return -EINVAL;
429                 }
430         }
431         up_read(&knet->kni_list_lock);
432
433         net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name,
434 #ifdef NET_NAME_UNKNOWN
435                                                         NET_NAME_UNKNOWN,
436 #endif
437                                                         kni_net_init);
438         if (net_dev == NULL) {
439                 KNI_ERR("error allocating device \"%s\"\n", dev_info.name);
440                 return -EBUSY;
441         }
442
443         dev_net_set(net_dev, net);
444
445         kni = netdev_priv(net_dev);
446
447         kni->net_dev = net_dev;
448         kni->group_id = dev_info.group_id;
449         kni->core_id = dev_info.core_id;
450         strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE);
451
452         /* Translate user space info into kernel space info */
453         kni->tx_q = phys_to_virt(dev_info.tx_phys);
454         kni->rx_q = phys_to_virt(dev_info.rx_phys);
455         kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
456         kni->free_q = phys_to_virt(dev_info.free_phys);
457
458         kni->req_q = phys_to_virt(dev_info.req_phys);
459         kni->resp_q = phys_to_virt(dev_info.resp_phys);
460         kni->sync_va = dev_info.sync_va;
461         kni->sync_kva = phys_to_virt(dev_info.sync_phys);
462
463         kni->mbuf_kva = phys_to_virt(dev_info.mbuf_phys);
464         kni->mbuf_va = dev_info.mbuf_va;
465
466 #ifdef RTE_KNI_VHOST
467         kni->vhost_queue = NULL;
468         kni->vq_status = BE_STOP;
469 #endif
470         kni->mbuf_size = dev_info.mbuf_size;
471
472         KNI_PRINT("tx_phys:      0x%016llx, tx_q addr:      0x%p\n",
473                 (unsigned long long) dev_info.tx_phys, kni->tx_q);
474         KNI_PRINT("rx_phys:      0x%016llx, rx_q addr:      0x%p\n",
475                 (unsigned long long) dev_info.rx_phys, kni->rx_q);
476         KNI_PRINT("alloc_phys:   0x%016llx, alloc_q addr:   0x%p\n",
477                 (unsigned long long) dev_info.alloc_phys, kni->alloc_q);
478         KNI_PRINT("free_phys:    0x%016llx, free_q addr:    0x%p\n",
479                 (unsigned long long) dev_info.free_phys, kni->free_q);
480         KNI_PRINT("req_phys:     0x%016llx, req_q addr:     0x%p\n",
481                 (unsigned long long) dev_info.req_phys, kni->req_q);
482         KNI_PRINT("resp_phys:    0x%016llx, resp_q addr:    0x%p\n",
483                 (unsigned long long) dev_info.resp_phys, kni->resp_q);
484         KNI_PRINT("mbuf_phys:    0x%016llx, mbuf_kva:       0x%p\n",
485                 (unsigned long long) dev_info.mbuf_phys, kni->mbuf_kva);
486         KNI_PRINT("mbuf_va:      0x%p\n", dev_info.mbuf_va);
487         KNI_PRINT("mbuf_size:    %u\n", kni->mbuf_size);
488
489         KNI_DBG("PCI: %02x:%02x.%02x %04x:%04x\n",
490                                         dev_info.bus,
491                                         dev_info.devid,
492                                         dev_info.function,
493                                         dev_info.vendor_id,
494                                         dev_info.device_id);
495
496         pci = pci_get_device(dev_info.vendor_id, dev_info.device_id, NULL);
497
498         /* Support Ethtool */
499         while (pci) {
500                 KNI_PRINT("pci_bus: %02x:%02x:%02x \n",
501                                         pci->bus->number,
502                                         PCI_SLOT(pci->devfn),
503                                         PCI_FUNC(pci->devfn));
504
505                 if ((pci->bus->number == dev_info.bus) &&
506                         (PCI_SLOT(pci->devfn) == dev_info.devid) &&
507                         (PCI_FUNC(pci->devfn) == dev_info.function)) {
508                         found_pci = pci;
509
510                         if (pci_match_id(ixgbe_pci_tbl, found_pci))
511                                 ret = ixgbe_kni_probe(found_pci, &lad_dev);
512                         else if (pci_match_id(igb_pci_tbl, found_pci))
513                                 ret = igb_kni_probe(found_pci, &lad_dev);
514                         else
515                                 ret = -1;
516
517                         KNI_DBG("PCI found: pci=0x%p, lad_dev=0x%p\n",
518                                                         pci, lad_dev);
519                         if (ret == 0) {
520                                 kni->lad_dev = lad_dev;
521                                 kni_set_ethtool_ops(kni->net_dev);
522                         } else {
523                                 KNI_ERR("Device not supported by ethtool");
524                                 kni->lad_dev = NULL;
525                         }
526
527                         kni->pci_dev = found_pci;
528                         kni->device_id = dev_info.device_id;
529                         break;
530                 }
531                 pci = pci_get_device(dev_info.vendor_id,
532                                 dev_info.device_id, pci);
533         }
534         if (pci)
535                 pci_dev_put(pci);
536
537         if (kni->lad_dev)
538                 memcpy(net_dev->dev_addr, kni->lad_dev->dev_addr, ETH_ALEN);
539         else
540                 /*
541                  * Generate random mac address. eth_random_addr() is the newer
542                  * version of generating mac address in linux kernel.
543                  */
544                 random_ether_addr(net_dev->dev_addr);
545
546         ret = register_netdev(net_dev);
547         if (ret) {
548                 KNI_ERR("error %i registering device \"%s\"\n",
549                                         ret, dev_info.name);
550                 kni_dev_remove(kni);
551                 return -ENODEV;
552         }
553
554 #ifdef RTE_KNI_VHOST
555         kni_vhost_init(kni);
556 #endif
557
558         /**
559          * Create a new kernel thread for multiple mode, set its core affinity,
560          * and finally wake it up.
561          */
562         if (multiple_kthread_on) {
563                 kni->pthread = kthread_create(kni_thread_multiple,
564                                               (void *)kni,
565                                               "kni_%s", kni->name);
566                 if (IS_ERR(kni->pthread)) {
567                         kni_dev_remove(kni);
568                         return -ECANCELED;
569                 }
570                 if (dev_info.force_bind)
571                         kthread_bind(kni->pthread, kni->core_id);
572                 wake_up_process(kni->pthread);
573         }
574
575         down_write(&knet->kni_list_lock);
576         list_add(&kni->list, &knet->kni_list_head);
577         up_write(&knet->kni_list_lock);
578
579         return 0;
580 }
581
582 static int
583 kni_ioctl_release(struct net *net,
584                 unsigned int ioctl_num, unsigned long ioctl_param)
585 {
586         struct kni_net *knet = net_generic(net, kni_net_id);
587         int ret = -EINVAL;
588         struct kni_dev *dev, *n;
589         struct rte_kni_device_info dev_info;
590
591         if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
592                         return -EINVAL;
593
594         ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
595         if (ret) {
596                 KNI_ERR("copy_from_user in kni_ioctl_release");
597                 return -EIO;
598         }
599
600         /* Release the network device according to its name */
601         if (strlen(dev_info.name) == 0)
602                 return ret;
603
604         down_write(&knet->kni_list_lock);
605         list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
606                 if (strncmp(dev->name, dev_info.name, RTE_KNI_NAMESIZE) != 0)
607                         continue;
608
609                 if (multiple_kthread_on && dev->pthread != NULL) {
610                         kthread_stop(dev->pthread);
611                         dev->pthread = NULL;
612                 }
613
614 #ifdef RTE_KNI_VHOST
615                 kni_vhost_backend_release(dev);
616 #endif
617                 kni_dev_remove(dev);
618                 list_del(&dev->list);
619                 ret = 0;
620                 break;
621         }
622         up_write(&knet->kni_list_lock);
623         printk(KERN_INFO "KNI: %s release kni named %s\n",
624                 (ret == 0 ? "Successfully" : "Unsuccessfully"), dev_info.name);
625
626         return ret;
627 }
628
629 static int
630 kni_ioctl(struct inode *inode,
631         unsigned int ioctl_num,
632         unsigned long ioctl_param)
633 {
634         int ret = -EINVAL;
635         struct net *net = current->nsproxy->net_ns;
636
637         KNI_DBG("IOCTL num=0x%0x param=0x%0lx\n", ioctl_num, ioctl_param);
638
639         /*
640          * Switch according to the ioctl called
641          */
642         switch (_IOC_NR(ioctl_num)) {
643         case _IOC_NR(RTE_KNI_IOCTL_TEST):
644                 /* For test only, not used */
645                 break;
646         case _IOC_NR(RTE_KNI_IOCTL_CREATE):
647                 ret = kni_ioctl_create(net, ioctl_num, ioctl_param);
648                 break;
649         case _IOC_NR(RTE_KNI_IOCTL_RELEASE):
650                 ret = kni_ioctl_release(net, ioctl_num, ioctl_param);
651                 break;
652         default:
653                 KNI_DBG("IOCTL default\n");
654                 break;
655         }
656
657         return ret;
658 }
659
660 static int
661 kni_compat_ioctl(struct inode *inode,
662                 unsigned int ioctl_num,
663                 unsigned long ioctl_param)
664 {
665         /* 32 bits app on 64 bits OS to be supported later */
666         KNI_PRINT("Not implemented.\n");
667
668         return -EINVAL;
669 }
670
671 module_init(kni_init);
672 module_exit(kni_exit);
673
674 module_param(lo_mode, charp, S_IRUGO | S_IWUSR);
675 MODULE_PARM_DESC(lo_mode,
676 "KNI loopback mode (default=lo_mode_none):\n"
677 "    lo_mode_none        Kernel loopback disabled\n"
678 "    lo_mode_fifo        Enable kernel loopback with fifo\n"
679 "    lo_mode_fifo_skb    Enable kernel loopback with fifo and skb buffer\n"
680 "\n"
681 );
682
683 module_param(kthread_mode, charp, S_IRUGO);
684 MODULE_PARM_DESC(kthread_mode,
685 "Kernel thread mode (default=single):\n"
686 "    single    Single kernel thread mode enabled.\n"
687 "    multiple  Multiple kernel thread mode enabled.\n"
688 "\n"
689 );