kni: remove ethtool support
[dpdk.git] / kernel / linux / kni / kni_misc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(c) 2010-2014 Intel Corporation.
4  */
5
6 #include <linux/version.h>
7 #include <linux/module.h>
8 #include <linux/miscdevice.h>
9 #include <linux/netdevice.h>
10 #include <linux/etherdevice.h>
11 #include <linux/pci.h>
12 #include <linux/kthread.h>
13 #include <linux/rwsem.h>
14 #include <linux/mutex.h>
15 #include <linux/nsproxy.h>
16 #include <net/net_namespace.h>
17 #include <net/netns/generic.h>
18
19 #include <rte_kni_common.h>
20
21 #include "compat.h"
22 #include "kni_dev.h"
23
24 MODULE_LICENSE("Dual BSD/GPL");
25 MODULE_AUTHOR("Intel Corporation");
26 MODULE_DESCRIPTION("Kernel Module for managing kni devices");
27
28 #define KNI_RX_LOOP_NUM 1000
29
30 #define KNI_MAX_DEVICES 32
31
32 /* loopback mode */
33 static char *lo_mode;
34
35 /* Kernel thread mode */
36 static char *kthread_mode;
37 static uint32_t multiple_kthread_on;
38
39 /* Default carrier state for created KNI network interfaces */
40 static char *carrier;
41 uint32_t dflt_carrier;
42
43 #define KNI_DEV_IN_USE_BIT_NUM 0 /* Bit number for device in use */
44
45 static int kni_net_id;
46
47 struct kni_net {
48         unsigned long device_in_use; /* device in use flag */
49         struct mutex kni_kthread_lock;
50         struct task_struct *kni_kthread;
51         struct rw_semaphore kni_list_lock;
52         struct list_head kni_list_head;
53 };
54
55 static int __net_init
56 kni_init_net(struct net *net)
57 {
58 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
59         struct kni_net *knet = net_generic(net, kni_net_id);
60
61         memset(knet, 0, sizeof(*knet));
62 #else
63         struct kni_net *knet;
64         int ret;
65
66         knet = kzalloc(sizeof(struct kni_net), GFP_KERNEL);
67         if (!knet) {
68                 ret = -ENOMEM;
69                 return ret;
70         }
71 #endif
72
73         /* Clear the bit of device in use */
74         clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
75
76         mutex_init(&knet->kni_kthread_lock);
77
78         init_rwsem(&knet->kni_list_lock);
79         INIT_LIST_HEAD(&knet->kni_list_head);
80
81 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
82         return 0;
83 #else
84         ret = net_assign_generic(net, kni_net_id, knet);
85         if (ret < 0)
86                 kfree(knet);
87
88         return ret;
89 #endif
90 }
91
92 static void __net_exit
93 kni_exit_net(struct net *net)
94 {
95         struct kni_net *knet __maybe_unused;
96
97         knet = net_generic(net, kni_net_id);
98         mutex_destroy(&knet->kni_kthread_lock);
99
100 #ifndef HAVE_SIMPLIFIED_PERNET_OPERATIONS
101         kfree(knet);
102 #endif
103 }
104
105 static struct pernet_operations kni_net_ops = {
106         .init = kni_init_net,
107         .exit = kni_exit_net,
108 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
109         .id   = &kni_net_id,
110         .size = sizeof(struct kni_net),
111 #endif
112 };
113
114 static int
115 kni_thread_single(void *data)
116 {
117         struct kni_net *knet = data;
118         int j;
119         struct kni_dev *dev;
120
121         while (!kthread_should_stop()) {
122                 down_read(&knet->kni_list_lock);
123                 for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
124                         list_for_each_entry(dev, &knet->kni_list_head, list) {
125                                 kni_net_rx(dev);
126                                 kni_net_poll_resp(dev);
127                         }
128                 }
129                 up_read(&knet->kni_list_lock);
130 #ifdef RTE_KNI_PREEMPT_DEFAULT
131                 /* reschedule out for a while */
132                 schedule_timeout_interruptible(
133                         usecs_to_jiffies(KNI_KTHREAD_RESCHEDULE_INTERVAL));
134 #endif
135         }
136
137         return 0;
138 }
139
140 static int
141 kni_thread_multiple(void *param)
142 {
143         int j;
144         struct kni_dev *dev = param;
145
146         while (!kthread_should_stop()) {
147                 for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
148                         kni_net_rx(dev);
149                         kni_net_poll_resp(dev);
150                 }
151 #ifdef RTE_KNI_PREEMPT_DEFAULT
152                 schedule_timeout_interruptible(
153                         usecs_to_jiffies(KNI_KTHREAD_RESCHEDULE_INTERVAL));
154 #endif
155         }
156
157         return 0;
158 }
159
160 static int
161 kni_open(struct inode *inode, struct file *file)
162 {
163         struct net *net = current->nsproxy->net_ns;
164         struct kni_net *knet = net_generic(net, kni_net_id);
165
166         /* kni device can be opened by one user only per netns */
167         if (test_and_set_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use))
168                 return -EBUSY;
169
170         file->private_data = get_net(net);
171         pr_debug("/dev/kni opened\n");
172
173         return 0;
174 }
175
176 static int
177 kni_dev_remove(struct kni_dev *dev)
178 {
179         if (!dev)
180                 return -ENODEV;
181
182         if (dev->net_dev) {
183                 unregister_netdev(dev->net_dev);
184                 free_netdev(dev->net_dev);
185         }
186
187         kni_net_release_fifo_phy(dev);
188
189         return 0;
190 }
191
192 static int
193 kni_release(struct inode *inode, struct file *file)
194 {
195         struct net *net = file->private_data;
196         struct kni_net *knet = net_generic(net, kni_net_id);
197         struct kni_dev *dev, *n;
198
199         /* Stop kernel thread for single mode */
200         if (multiple_kthread_on == 0) {
201                 mutex_lock(&knet->kni_kthread_lock);
202                 /* Stop kernel thread */
203                 if (knet->kni_kthread != NULL) {
204                         kthread_stop(knet->kni_kthread);
205                         knet->kni_kthread = NULL;
206                 }
207                 mutex_unlock(&knet->kni_kthread_lock);
208         }
209
210         down_write(&knet->kni_list_lock);
211         list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
212                 /* Stop kernel thread for multiple mode */
213                 if (multiple_kthread_on && dev->pthread != NULL) {
214                         kthread_stop(dev->pthread);
215                         dev->pthread = NULL;
216                 }
217
218                 kni_dev_remove(dev);
219                 list_del(&dev->list);
220         }
221         up_write(&knet->kni_list_lock);
222
223         /* Clear the bit of device in use */
224         clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
225
226         put_net(net);
227         pr_debug("/dev/kni closed\n");
228
229         return 0;
230 }
231
232 static int
233 kni_check_param(struct kni_dev *kni, struct rte_kni_device_info *dev)
234 {
235         if (!kni || !dev)
236                 return -1;
237
238         /* Check if network name has been used */
239         if (!strncmp(kni->name, dev->name, RTE_KNI_NAMESIZE)) {
240                 pr_err("KNI name %s duplicated\n", dev->name);
241                 return -1;
242         }
243
244         return 0;
245 }
246
247 static int
248 kni_run_thread(struct kni_net *knet, struct kni_dev *kni, uint8_t force_bind)
249 {
250         /**
251          * Create a new kernel thread for multiple mode, set its core affinity,
252          * and finally wake it up.
253          */
254         if (multiple_kthread_on) {
255                 kni->pthread = kthread_create(kni_thread_multiple,
256                         (void *)kni, "kni_%s", kni->name);
257                 if (IS_ERR(kni->pthread)) {
258                         kni_dev_remove(kni);
259                         return -ECANCELED;
260                 }
261
262                 if (force_bind)
263                         kthread_bind(kni->pthread, kni->core_id);
264                 wake_up_process(kni->pthread);
265         } else {
266                 mutex_lock(&knet->kni_kthread_lock);
267
268                 if (knet->kni_kthread == NULL) {
269                         knet->kni_kthread = kthread_create(kni_thread_single,
270                                 (void *)knet, "kni_single");
271                         if (IS_ERR(knet->kni_kthread)) {
272                                 mutex_unlock(&knet->kni_kthread_lock);
273                                 kni_dev_remove(kni);
274                                 return -ECANCELED;
275                         }
276
277                         if (force_bind)
278                                 kthread_bind(knet->kni_kthread, kni->core_id);
279                         wake_up_process(knet->kni_kthread);
280                 }
281
282                 mutex_unlock(&knet->kni_kthread_lock);
283         }
284
285         return 0;
286 }
287
288 static int
289 kni_ioctl_create(struct net *net, uint32_t ioctl_num,
290                 unsigned long ioctl_param)
291 {
292         struct kni_net *knet = net_generic(net, kni_net_id);
293         int ret;
294         struct rte_kni_device_info dev_info;
295         struct net_device *net_dev = NULL;
296         struct kni_dev *kni, *dev, *n;
297
298         pr_info("Creating kni...\n");
299         /* Check the buffer size, to avoid warning */
300         if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
301                 return -EINVAL;
302
303         /* Copy kni info from user space */
304         ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
305         if (ret) {
306                 pr_err("copy_from_user in kni_ioctl_create");
307                 return -EIO;
308         }
309
310         /* Check if name is zero-ended */
311         if (strnlen(dev_info.name, sizeof(dev_info.name)) == sizeof(dev_info.name)) {
312                 pr_err("kni.name not zero-terminated");
313                 return -EINVAL;
314         }
315
316         /**
317          * Check if the cpu core id is valid for binding.
318          */
319         if (dev_info.force_bind && !cpu_online(dev_info.core_id)) {
320                 pr_err("cpu %u is not online\n", dev_info.core_id);
321                 return -EINVAL;
322         }
323
324         /* Check if it has been created */
325         down_read(&knet->kni_list_lock);
326         list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
327                 if (kni_check_param(dev, &dev_info) < 0) {
328                         up_read(&knet->kni_list_lock);
329                         return -EINVAL;
330                 }
331         }
332         up_read(&knet->kni_list_lock);
333
334         net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name,
335 #ifdef NET_NAME_USER
336                                                         NET_NAME_USER,
337 #endif
338                                                         kni_net_init);
339         if (net_dev == NULL) {
340                 pr_err("error allocating device \"%s\"\n", dev_info.name);
341                 return -EBUSY;
342         }
343
344         dev_net_set(net_dev, net);
345
346         kni = netdev_priv(net_dev);
347
348         kni->net_dev = net_dev;
349         kni->group_id = dev_info.group_id;
350         kni->core_id = dev_info.core_id;
351         strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE);
352
353         /* Translate user space info into kernel space info */
354         kni->tx_q = phys_to_virt(dev_info.tx_phys);
355         kni->rx_q = phys_to_virt(dev_info.rx_phys);
356         kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
357         kni->free_q = phys_to_virt(dev_info.free_phys);
358
359         kni->req_q = phys_to_virt(dev_info.req_phys);
360         kni->resp_q = phys_to_virt(dev_info.resp_phys);
361         kni->sync_va = dev_info.sync_va;
362         kni->sync_kva = phys_to_virt(dev_info.sync_phys);
363
364         kni->mbuf_size = dev_info.mbuf_size;
365
366         pr_debug("tx_phys:      0x%016llx, tx_q addr:      0x%p\n",
367                 (unsigned long long) dev_info.tx_phys, kni->tx_q);
368         pr_debug("rx_phys:      0x%016llx, rx_q addr:      0x%p\n",
369                 (unsigned long long) dev_info.rx_phys, kni->rx_q);
370         pr_debug("alloc_phys:   0x%016llx, alloc_q addr:   0x%p\n",
371                 (unsigned long long) dev_info.alloc_phys, kni->alloc_q);
372         pr_debug("free_phys:    0x%016llx, free_q addr:    0x%p\n",
373                 (unsigned long long) dev_info.free_phys, kni->free_q);
374         pr_debug("req_phys:     0x%016llx, req_q addr:     0x%p\n",
375                 (unsigned long long) dev_info.req_phys, kni->req_q);
376         pr_debug("resp_phys:    0x%016llx, resp_q addr:    0x%p\n",
377                 (unsigned long long) dev_info.resp_phys, kni->resp_q);
378         pr_debug("mbuf_size:    %u\n", kni->mbuf_size);
379
380         pr_debug("PCI: %02x:%02x.%02x %04x:%04x\n",
381                                         dev_info.bus,
382                                         dev_info.devid,
383                                         dev_info.function,
384                                         dev_info.vendor_id,
385                                         dev_info.device_id);
386         /* if user has provided a valid mac address */
387         if (is_valid_ether_addr(dev_info.mac_addr))
388                 memcpy(net_dev->dev_addr, dev_info.mac_addr, ETH_ALEN);
389         else
390                 /*
391                  * Generate random mac address. eth_random_addr() is the
392                  * newer version of generating mac address in kernel.
393                  */
394                 random_ether_addr(net_dev->dev_addr);
395
396         if (dev_info.mtu)
397                 net_dev->mtu = dev_info.mtu;
398 #ifdef HAVE_MAX_MTU_PARAM
399         net_dev->max_mtu = net_dev->mtu;
400 #endif
401
402         ret = register_netdev(net_dev);
403         if (ret) {
404                 pr_err("error %i registering device \"%s\"\n",
405                                         ret, dev_info.name);
406                 kni->net_dev = NULL;
407                 kni_dev_remove(kni);
408                 free_netdev(net_dev);
409                 return -ENODEV;
410         }
411
412         netif_carrier_off(net_dev);
413
414         ret = kni_run_thread(knet, kni, dev_info.force_bind);
415         if (ret != 0)
416                 return ret;
417
418         down_write(&knet->kni_list_lock);
419         list_add(&kni->list, &knet->kni_list_head);
420         up_write(&knet->kni_list_lock);
421
422         return 0;
423 }
424
425 static int
426 kni_ioctl_release(struct net *net, uint32_t ioctl_num,
427                 unsigned long ioctl_param)
428 {
429         struct kni_net *knet = net_generic(net, kni_net_id);
430         int ret = -EINVAL;
431         struct kni_dev *dev, *n;
432         struct rte_kni_device_info dev_info;
433
434         if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
435                 return -EINVAL;
436
437         ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
438         if (ret) {
439                 pr_err("copy_from_user in kni_ioctl_release");
440                 return -EIO;
441         }
442
443         /* Release the network device according to its name */
444         if (strlen(dev_info.name) == 0)
445                 return ret;
446
447         down_write(&knet->kni_list_lock);
448         list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
449                 if (strncmp(dev->name, dev_info.name, RTE_KNI_NAMESIZE) != 0)
450                         continue;
451
452                 if (multiple_kthread_on && dev->pthread != NULL) {
453                         kthread_stop(dev->pthread);
454                         dev->pthread = NULL;
455                 }
456
457                 kni_dev_remove(dev);
458                 list_del(&dev->list);
459                 ret = 0;
460                 break;
461         }
462         up_write(&knet->kni_list_lock);
463         pr_info("%s release kni named %s\n",
464                 (ret == 0 ? "Successfully" : "Unsuccessfully"), dev_info.name);
465
466         return ret;
467 }
468
469 static int
470 kni_ioctl(struct inode *inode, uint32_t ioctl_num, unsigned long ioctl_param)
471 {
472         int ret = -EINVAL;
473         struct net *net = current->nsproxy->net_ns;
474
475         pr_debug("IOCTL num=0x%0x param=0x%0lx\n", ioctl_num, ioctl_param);
476
477         /*
478          * Switch according to the ioctl called
479          */
480         switch (_IOC_NR(ioctl_num)) {
481         case _IOC_NR(RTE_KNI_IOCTL_TEST):
482                 /* For test only, not used */
483                 break;
484         case _IOC_NR(RTE_KNI_IOCTL_CREATE):
485                 ret = kni_ioctl_create(net, ioctl_num, ioctl_param);
486                 break;
487         case _IOC_NR(RTE_KNI_IOCTL_RELEASE):
488                 ret = kni_ioctl_release(net, ioctl_num, ioctl_param);
489                 break;
490         default:
491                 pr_debug("IOCTL default\n");
492                 break;
493         }
494
495         return ret;
496 }
497
498 static int
499 kni_compat_ioctl(struct inode *inode, uint32_t ioctl_num,
500                 unsigned long ioctl_param)
501 {
502         /* 32 bits app on 64 bits OS to be supported later */
503         pr_debug("Not implemented.\n");
504
505         return -EINVAL;
506 }
507
508 static const struct file_operations kni_fops = {
509         .owner = THIS_MODULE,
510         .open = kni_open,
511         .release = kni_release,
512         .unlocked_ioctl = (void *)kni_ioctl,
513         .compat_ioctl = (void *)kni_compat_ioctl,
514 };
515
516 static struct miscdevice kni_misc = {
517         .minor = MISC_DYNAMIC_MINOR,
518         .name = KNI_DEVICE,
519         .fops = &kni_fops,
520 };
521
522 static int __init
523 kni_parse_kthread_mode(void)
524 {
525         if (!kthread_mode)
526                 return 0;
527
528         if (strcmp(kthread_mode, "single") == 0)
529                 return 0;
530         else if (strcmp(kthread_mode, "multiple") == 0)
531                 multiple_kthread_on = 1;
532         else
533                 return -1;
534
535         return 0;
536 }
537
538 static int __init
539 kni_parse_carrier_state(void)
540 {
541         if (!carrier) {
542                 dflt_carrier = 0;
543                 return 0;
544         }
545
546         if (strcmp(carrier, "off") == 0)
547                 dflt_carrier = 0;
548         else if (strcmp(carrier, "on") == 0)
549                 dflt_carrier = 1;
550         else
551                 return -1;
552
553         return 0;
554 }
555
556 static int __init
557 kni_init(void)
558 {
559         int rc;
560
561         if (kni_parse_kthread_mode() < 0) {
562                 pr_err("Invalid parameter for kthread_mode\n");
563                 return -EINVAL;
564         }
565
566         if (multiple_kthread_on == 0)
567                 pr_debug("Single kernel thread for all KNI devices\n");
568         else
569                 pr_debug("Multiple kernel thread mode enabled\n");
570
571         if (kni_parse_carrier_state() < 0) {
572                 pr_err("Invalid parameter for carrier\n");
573                 return -EINVAL;
574         }
575
576         if (dflt_carrier == 0)
577                 pr_debug("Default carrier state set to off.\n");
578         else
579                 pr_debug("Default carrier state set to on.\n");
580
581 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
582         rc = register_pernet_subsys(&kni_net_ops);
583 #else
584         rc = register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
585 #endif
586         if (rc)
587                 return -EPERM;
588
589         rc = misc_register(&kni_misc);
590         if (rc != 0) {
591                 pr_err("Misc registration failed\n");
592                 goto out;
593         }
594
595         /* Configure the lo mode according to the input parameter */
596         kni_net_config_lo_mode(lo_mode);
597
598         return 0;
599
600 out:
601 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
602         unregister_pernet_subsys(&kni_net_ops);
603 #else
604         unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
605 #endif
606         return rc;
607 }
608
609 static void __exit
610 kni_exit(void)
611 {
612         misc_deregister(&kni_misc);
613 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
614         unregister_pernet_subsys(&kni_net_ops);
615 #else
616         unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
617 #endif
618 }
619
620 module_init(kni_init);
621 module_exit(kni_exit);
622
623 module_param(lo_mode, charp, 0644);
624 MODULE_PARM_DESC(lo_mode,
625 "KNI loopback mode (default=lo_mode_none):\n"
626 "\t\tlo_mode_none        Kernel loopback disabled\n"
627 "\t\tlo_mode_fifo        Enable kernel loopback with fifo\n"
628 "\t\tlo_mode_fifo_skb    Enable kernel loopback with fifo and skb buffer\n"
629 "\t\t"
630 );
631
632 module_param(kthread_mode, charp, 0644);
633 MODULE_PARM_DESC(kthread_mode,
634 "Kernel thread mode (default=single):\n"
635 "\t\tsingle    Single kernel thread mode enabled.\n"
636 "\t\tmultiple  Multiple kernel thread mode enabled.\n"
637 "\t\t"
638 );
639
640 module_param(carrier, charp, 0644);
641 MODULE_PARM_DESC(carrier,
642 "Default carrier state for KNI interface (default=off):\n"
643 "\t\toff   Interfaces will be created with carrier state set to off.\n"
644 "\t\ton    Interfaces will be created with carrier state set to on.\n"
645 "\t\t"
646 );