4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
21 * Contact Information:
25 #include <linux/version.h>
26 #include <linux/module.h>
27 #include <linux/miscdevice.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/pci.h>
31 #include <linux/kthread.h>
32 #include <linux/rwsem.h>
33 #include <linux/mutex.h>
34 #include <linux/nsproxy.h>
35 #include <net/net_namespace.h>
36 #include <net/netns/generic.h>
38 #include <exec-env/rte_kni_common.h>
43 MODULE_LICENSE("Dual BSD/GPL");
44 MODULE_AUTHOR("Intel Corporation");
45 MODULE_DESCRIPTION("Kernel Module for managing kni devices");
47 #define KNI_RX_LOOP_NUM 1000
49 #define KNI_MAX_DEVICES 32
51 extern const struct pci_device_id ixgbe_pci_tbl[];
52 extern const struct pci_device_id igb_pci_tbl[];
57 /* Kernel thread mode */
58 static char *kthread_mode;
59 static uint32_t multiple_kthread_on;
61 #define KNI_DEV_IN_USE_BIT_NUM 0 /* Bit number for device in use */
63 static int kni_net_id;
66 unsigned long device_in_use; /* device in use flag */
67 struct mutex kni_kthread_lock;
68 struct task_struct *kni_kthread;
69 struct rw_semaphore kni_list_lock;
70 struct list_head kni_list_head;
74 kni_init_net(struct net *net)
76 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
77 struct kni_net *knet = net_generic(net, kni_net_id);
79 memset(knet, 0, sizeof(*knet));
84 knet = kzalloc(sizeof(struct kni_net), GFP_KERNEL);
91 /* Clear the bit of device in use */
92 clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
94 mutex_init(&knet->kni_kthread_lock);
96 init_rwsem(&knet->kni_list_lock);
97 INIT_LIST_HEAD(&knet->kni_list_head);
99 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
102 ret = net_assign_generic(net, kni_net_id, knet);
110 static void __net_exit
111 kni_exit_net(struct net *net)
113 struct kni_net *knet __maybe_unused;
115 knet = net_generic(net, kni_net_id);
116 mutex_destroy(&knet->kni_kthread_lock);
118 #ifndef HAVE_SIMPLIFIED_PERNET_OPERATIONS
123 static struct pernet_operations kni_net_ops = {
124 .init = kni_init_net,
125 .exit = kni_exit_net,
126 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
128 .size = sizeof(struct kni_net),
133 kni_thread_single(void *data)
135 struct kni_net *knet = data;
139 while (!kthread_should_stop()) {
140 down_read(&knet->kni_list_lock);
141 for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
142 list_for_each_entry(dev, &knet->kni_list_head, list) {
144 kni_chk_vhost_rx(dev);
148 kni_net_poll_resp(dev);
151 up_read(&knet->kni_list_lock);
152 #ifdef RTE_KNI_PREEMPT_DEFAULT
153 /* reschedule out for a while */
154 schedule_timeout_interruptible(
155 usecs_to_jiffies(KNI_KTHREAD_RESCHEDULE_INTERVAL));
163 kni_thread_multiple(void *param)
166 struct kni_dev *dev = (struct kni_dev *)param;
168 while (!kthread_should_stop()) {
169 for (j = 0; j < KNI_RX_LOOP_NUM; j++) {
171 kni_chk_vhost_rx(dev);
175 kni_net_poll_resp(dev);
177 #ifdef RTE_KNI_PREEMPT_DEFAULT
178 schedule_timeout_interruptible(
179 usecs_to_jiffies(KNI_KTHREAD_RESCHEDULE_INTERVAL));
187 kni_open(struct inode *inode, struct file *file)
189 struct net *net = current->nsproxy->net_ns;
190 struct kni_net *knet = net_generic(net, kni_net_id);
192 /* kni device can be opened by one user only per netns */
193 if (test_and_set_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use))
196 file->private_data = get_net(net);
197 pr_debug("/dev/kni opened\n");
203 kni_dev_remove(struct kni_dev *dev)
208 #ifdef CONFIG_RTE_KNI_KMOD_ETHTOOL
210 if (pci_match_id(ixgbe_pci_tbl, dev->pci_dev))
211 ixgbe_kni_remove(dev->pci_dev);
212 else if (pci_match_id(igb_pci_tbl, dev->pci_dev))
213 igb_kni_remove(dev->pci_dev);
218 unregister_netdev(dev->net_dev);
219 free_netdev(dev->net_dev);
226 kni_release(struct inode *inode, struct file *file)
228 struct net *net = file->private_data;
229 struct kni_net *knet = net_generic(net, kni_net_id);
230 struct kni_dev *dev, *n;
232 /* Stop kernel thread for single mode */
233 if (multiple_kthread_on == 0) {
234 mutex_lock(&knet->kni_kthread_lock);
235 /* Stop kernel thread */
236 if (knet->kni_kthread != NULL) {
237 kthread_stop(knet->kni_kthread);
238 knet->kni_kthread = NULL;
240 mutex_unlock(&knet->kni_kthread_lock);
243 down_write(&knet->kni_list_lock);
244 list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
245 /* Stop kernel thread for multiple mode */
246 if (multiple_kthread_on && dev->pthread != NULL) {
247 kthread_stop(dev->pthread);
252 kni_vhost_backend_release(dev);
255 list_del(&dev->list);
257 up_write(&knet->kni_list_lock);
259 /* Clear the bit of device in use */
260 clear_bit(KNI_DEV_IN_USE_BIT_NUM, &knet->device_in_use);
263 pr_debug("/dev/kni closed\n");
269 kni_check_param(struct kni_dev *kni, struct rte_kni_device_info *dev)
274 /* Check if network name has been used */
275 if (!strncmp(kni->name, dev->name, RTE_KNI_NAMESIZE)) {
276 pr_err("KNI name %s duplicated\n", dev->name);
284 kni_run_thread(struct kni_net *knet, struct kni_dev *kni, uint8_t force_bind)
287 * Create a new kernel thread for multiple mode, set its core affinity,
288 * and finally wake it up.
290 if (multiple_kthread_on) {
291 kni->pthread = kthread_create(kni_thread_multiple,
292 (void *)kni, "kni_%s", kni->name);
293 if (IS_ERR(kni->pthread)) {
299 kthread_bind(kni->pthread, kni->core_id);
300 wake_up_process(kni->pthread);
302 mutex_lock(&knet->kni_kthread_lock);
304 if (knet->kni_kthread == NULL) {
305 knet->kni_kthread = kthread_create(kni_thread_single,
306 (void *)knet, "kni_single");
307 if (IS_ERR(knet->kni_kthread)) {
308 mutex_unlock(&knet->kni_kthread_lock);
314 kthread_bind(knet->kni_kthread, kni->core_id);
315 wake_up_process(knet->kni_kthread);
318 mutex_unlock(&knet->kni_kthread_lock);
325 kni_ioctl_create(struct net *net, uint32_t ioctl_num,
326 unsigned long ioctl_param)
328 struct kni_net *knet = net_generic(net, kni_net_id);
330 struct rte_kni_device_info dev_info;
331 struct net_device *net_dev = NULL;
332 struct kni_dev *kni, *dev, *n;
334 pr_info("Creating kni...\n");
335 /* Check the buffer size, to avoid warning */
336 if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
339 /* Copy kni info from user space */
340 ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
342 pr_err("copy_from_user in kni_ioctl_create");
347 * Check if the cpu core id is valid for binding.
349 if (dev_info.force_bind && !cpu_online(dev_info.core_id)) {
350 pr_err("cpu %u is not online\n", dev_info.core_id);
354 /* Check if it has been created */
355 down_read(&knet->kni_list_lock);
356 list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
357 if (kni_check_param(dev, &dev_info) < 0) {
358 up_read(&knet->kni_list_lock);
362 up_read(&knet->kni_list_lock);
364 net_dev = alloc_netdev(sizeof(struct kni_dev), dev_info.name,
365 #ifdef NET_NAME_UNKNOWN
369 if (net_dev == NULL) {
370 pr_err("error allocating device \"%s\"\n", dev_info.name);
374 dev_net_set(net_dev, net);
376 kni = netdev_priv(net_dev);
378 kni->net_dev = net_dev;
379 kni->group_id = dev_info.group_id;
380 kni->core_id = dev_info.core_id;
381 strncpy(kni->name, dev_info.name, RTE_KNI_NAMESIZE);
383 /* Translate user space info into kernel space info */
384 kni->tx_q = phys_to_virt(dev_info.tx_phys);
385 kni->rx_q = phys_to_virt(dev_info.rx_phys);
386 kni->alloc_q = phys_to_virt(dev_info.alloc_phys);
387 kni->free_q = phys_to_virt(dev_info.free_phys);
389 kni->req_q = phys_to_virt(dev_info.req_phys);
390 kni->resp_q = phys_to_virt(dev_info.resp_phys);
391 kni->sync_va = dev_info.sync_va;
392 kni->sync_kva = phys_to_virt(dev_info.sync_phys);
395 kni->vhost_queue = NULL;
396 kni->vq_status = BE_STOP;
398 kni->mbuf_size = dev_info.mbuf_size;
400 pr_debug("tx_phys: 0x%016llx, tx_q addr: 0x%p\n",
401 (unsigned long long) dev_info.tx_phys, kni->tx_q);
402 pr_debug("rx_phys: 0x%016llx, rx_q addr: 0x%p\n",
403 (unsigned long long) dev_info.rx_phys, kni->rx_q);
404 pr_debug("alloc_phys: 0x%016llx, alloc_q addr: 0x%p\n",
405 (unsigned long long) dev_info.alloc_phys, kni->alloc_q);
406 pr_debug("free_phys: 0x%016llx, free_q addr: 0x%p\n",
407 (unsigned long long) dev_info.free_phys, kni->free_q);
408 pr_debug("req_phys: 0x%016llx, req_q addr: 0x%p\n",
409 (unsigned long long) dev_info.req_phys, kni->req_q);
410 pr_debug("resp_phys: 0x%016llx, resp_q addr: 0x%p\n",
411 (unsigned long long) dev_info.resp_phys, kni->resp_q);
412 pr_debug("mbuf_size: %u\n", kni->mbuf_size);
414 pr_debug("PCI: %02x:%02x.%02x %04x:%04x\n",
421 #ifdef CONFIG_RTE_KNI_KMOD_ETHTOOL
422 struct pci_dev *found_pci = NULL;
423 struct net_device *lad_dev = NULL;
424 struct pci_dev *pci = NULL;
426 pci = pci_get_device(dev_info.vendor_id, dev_info.device_id, NULL);
428 /* Support Ethtool */
430 pr_debug("pci_bus: %02x:%02x:%02x\n",
432 PCI_SLOT(pci->devfn),
433 PCI_FUNC(pci->devfn));
435 if ((pci->bus->number == dev_info.bus) &&
436 (PCI_SLOT(pci->devfn) == dev_info.devid) &&
437 (PCI_FUNC(pci->devfn) == dev_info.function)) {
440 if (pci_match_id(ixgbe_pci_tbl, found_pci))
441 ret = ixgbe_kni_probe(found_pci, &lad_dev);
442 else if (pci_match_id(igb_pci_tbl, found_pci))
443 ret = igb_kni_probe(found_pci, &lad_dev);
447 pr_debug("PCI found: pci=0x%p, lad_dev=0x%p\n",
450 kni->lad_dev = lad_dev;
451 kni_set_ethtool_ops(kni->net_dev);
453 pr_err("Device not supported by ethtool");
457 kni->pci_dev = found_pci;
458 kni->device_id = dev_info.device_id;
461 pci = pci_get_device(dev_info.vendor_id,
462 dev_info.device_id, pci);
469 ether_addr_copy(net_dev->dev_addr, kni->lad_dev->dev_addr);
472 * Generate random mac address. eth_random_addr() is the newer
473 * version of generating mac address in linux kernel.
475 random_ether_addr(net_dev->dev_addr);
477 ret = register_netdev(net_dev);
479 pr_err("error %i registering device \"%s\"\n",
483 free_netdev(net_dev);
491 ret = kni_run_thread(knet, kni, dev_info.force_bind);
495 down_write(&knet->kni_list_lock);
496 list_add(&kni->list, &knet->kni_list_head);
497 up_write(&knet->kni_list_lock);
503 kni_ioctl_release(struct net *net, uint32_t ioctl_num,
504 unsigned long ioctl_param)
506 struct kni_net *knet = net_generic(net, kni_net_id);
508 struct kni_dev *dev, *n;
509 struct rte_kni_device_info dev_info;
511 if (_IOC_SIZE(ioctl_num) > sizeof(dev_info))
514 ret = copy_from_user(&dev_info, (void *)ioctl_param, sizeof(dev_info));
516 pr_err("copy_from_user in kni_ioctl_release");
520 /* Release the network device according to its name */
521 if (strlen(dev_info.name) == 0)
524 down_write(&knet->kni_list_lock);
525 list_for_each_entry_safe(dev, n, &knet->kni_list_head, list) {
526 if (strncmp(dev->name, dev_info.name, RTE_KNI_NAMESIZE) != 0)
529 if (multiple_kthread_on && dev->pthread != NULL) {
530 kthread_stop(dev->pthread);
535 kni_vhost_backend_release(dev);
538 list_del(&dev->list);
542 up_write(&knet->kni_list_lock);
543 pr_info("%s release kni named %s\n",
544 (ret == 0 ? "Successfully" : "Unsuccessfully"), dev_info.name);
550 kni_ioctl(struct inode *inode, uint32_t ioctl_num, unsigned long ioctl_param)
553 struct net *net = current->nsproxy->net_ns;
555 pr_debug("IOCTL num=0x%0x param=0x%0lx\n", ioctl_num, ioctl_param);
558 * Switch according to the ioctl called
560 switch (_IOC_NR(ioctl_num)) {
561 case _IOC_NR(RTE_KNI_IOCTL_TEST):
562 /* For test only, not used */
564 case _IOC_NR(RTE_KNI_IOCTL_CREATE):
565 ret = kni_ioctl_create(net, ioctl_num, ioctl_param);
567 case _IOC_NR(RTE_KNI_IOCTL_RELEASE):
568 ret = kni_ioctl_release(net, ioctl_num, ioctl_param);
571 pr_debug("IOCTL default\n");
579 kni_compat_ioctl(struct inode *inode, uint32_t ioctl_num,
580 unsigned long ioctl_param)
582 /* 32 bits app on 64 bits OS to be supported later */
583 pr_debug("Not implemented.\n");
588 static const struct file_operations kni_fops = {
589 .owner = THIS_MODULE,
591 .release = kni_release,
592 .unlocked_ioctl = (void *)kni_ioctl,
593 .compat_ioctl = (void *)kni_compat_ioctl,
596 static struct miscdevice kni_misc = {
597 .minor = MISC_DYNAMIC_MINOR,
603 kni_parse_kthread_mode(void)
608 if (strcmp(kthread_mode, "single") == 0)
610 else if (strcmp(kthread_mode, "multiple") == 0)
611 multiple_kthread_on = 1;
623 if (kni_parse_kthread_mode() < 0) {
624 pr_err("Invalid parameter for kthread_mode\n");
628 if (multiple_kthread_on == 0)
629 pr_debug("Single kernel thread for all KNI devices\n");
631 pr_debug("Multiple kernel thread mode enabled\n");
633 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
634 rc = register_pernet_subsys(&kni_net_ops);
636 rc = register_pernet_gen_subsys(&kni_net_id, &kni_net_ops);
641 rc = misc_register(&kni_misc);
643 pr_err("Misc registration failed\n");
647 /* Configure the lo mode according to the input parameter */
648 kni_net_config_lo_mode(lo_mode);
653 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
654 unregister_pernet_subsys(&kni_net_ops);
656 unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
664 misc_deregister(&kni_misc);
665 #ifdef HAVE_SIMPLIFIED_PERNET_OPERATIONS
666 unregister_pernet_subsys(&kni_net_ops);
668 unregister_pernet_gen_subsys(kni_net_id, &kni_net_ops);
672 module_init(kni_init);
673 module_exit(kni_exit);
675 module_param(lo_mode, charp, S_IRUGO | S_IWUSR);
676 MODULE_PARM_DESC(lo_mode,
677 "KNI loopback mode (default=lo_mode_none):\n"
678 " lo_mode_none Kernel loopback disabled\n"
679 " lo_mode_fifo Enable kernel loopback with fifo\n"
680 " lo_mode_fifo_skb Enable kernel loopback with fifo and skb buffer\n"
684 module_param(kthread_mode, charp, S_IRUGO);
685 MODULE_PARM_DESC(kthread_mode,
686 "Kernel thread mode (default=single):\n"
687 " single Single kernel thread mode enabled.\n"
688 " multiple Multiple kernel thread mode enabled.\n"