4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
21 * Contact Information:
25 #include <linux/device.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/uio_driver.h>
30 #include <linux/msi.h>
31 #include <linux/version.h>
33 #ifdef CONFIG_XEN_DOM0
38 * MSI-X related macros, copy from linux/pci_regs.h in kernel 2.6.39,
39 * but none of them in kernel 2.6.35.
41 #ifndef PCI_MSIX_ENTRY_SIZE
42 #define PCI_MSIX_ENTRY_SIZE 16
43 #define PCI_MSIX_ENTRY_LOWER_ADDR 0
44 #define PCI_MSIX_ENTRY_UPPER_ADDR 4
45 #define PCI_MSIX_ENTRY_DATA 8
46 #define PCI_MSIX_ENTRY_VECTOR_CTRL 12
47 #define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
50 #define IGBUIO_NUM_MSI_VECTORS 1
53 enum igbuio_intr_mode {
54 IGBUIO_LEGACY_INTR_MODE = 0,
56 IGBUIO_MSIX_INTR_MODE,
61 * A structure describing the private information for a uio device.
63 struct rte_uio_pci_dev {
66 spinlock_t lock; /* spinlock for accessing PCI config space or msix data in multi tasks/isr */
67 enum igbuio_intr_mode mode;
69 msix_entries[IGBUIO_NUM_MSI_VECTORS]; /* pointer to the msix vectors to be allocated later */
72 static char *intr_mode = NULL;
73 static enum igbuio_intr_mode igbuio_intr_mode_preferred = IGBUIO_MSIX_INTR_MODE;
75 /* PCI device id table */
76 static struct pci_device_id igbuio_pci_ids[] = {
77 #define RTE_PCI_DEV_ID_DECL_EM(vend, dev) {PCI_DEVICE(vend, dev)},
78 #define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {PCI_DEVICE(vend, dev)},
79 #define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {PCI_DEVICE(vend, dev)},
80 #define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {PCI_DEVICE(vend, dev)},
81 #define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {PCI_DEVICE(vend, dev)},
82 #define RTE_PCI_DEV_ID_DECL_VIRTIO(vend, dev) {PCI_DEVICE(vend, dev)},
83 #include <rte_pci_dev_ids.h>
87 MODULE_DEVICE_TABLE(pci, igbuio_pci_ids);
89 static inline struct rte_uio_pci_dev *
90 igbuio_get_uio_pci_dev(struct uio_info *info)
92 return container_of(info, struct rte_uio_pci_dev, info);
96 int local_pci_num_vf(struct pci_dev *dev)
98 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
107 } *iov = (struct iov*)dev->sriov;
112 return iov->nr_virtfn;
114 return pci_num_vf(dev);
119 show_max_vfs(struct device *dev, struct device_attribute *attr,
122 return snprintf(buf, 10, "%u\n", local_pci_num_vf(
123 container_of(dev, struct pci_dev, dev)));
127 store_max_vfs(struct device *dev, struct device_attribute *attr,
128 const char *buf, size_t count)
131 unsigned long max_vfs;
132 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
134 if (0 != strict_strtoul(buf, 0, &max_vfs))
138 pci_disable_sriov(pdev);
139 else if (0 == local_pci_num_vf(pdev))
140 err = pci_enable_sriov(pdev, max_vfs);
141 else /* do nothing if change max_vfs number */
144 return err ? err : count;
147 static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
148 static struct attribute *dev_attrs[] = {
149 &dev_attr_max_vfs.attr,
153 static const struct attribute_group dev_attr_grp = {
158 pci_lock(struct pci_dev * pdev)
160 /* Some function names changes between 3.2.0 and 3.3.0... */
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
162 pci_block_user_cfg_access(pdev);
165 return pci_cfg_access_trylock(pdev);
170 pci_unlock(struct pci_dev * pdev)
172 /* Some function names changes between 3.2.0 and 3.3.0... */
173 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
174 pci_unblock_user_cfg_access(pdev);
176 pci_cfg_access_unlock(pdev);
181 * It masks the msix on/off of generating MSI-X messages.
184 igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
186 uint32_t mask_bits = desc->masked;
187 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
188 PCI_MSIX_ENTRY_VECTOR_CTRL;
191 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
193 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
195 if (mask_bits != desc->masked) {
196 writel(mask_bits, desc->mask_base + offset);
197 readl(desc->mask_base);
198 desc->masked = mask_bits;
205 * This function sets/clears the masks for generating LSC interrupts.
208 * The pointer to struct uio_info.
210 * The on/off flag of masking LSC.
212 * -On success, zero value.
213 * -On failure, a negative value.
216 igbuio_set_interrupt_mask(struct rte_uio_pci_dev *udev, int32_t state)
218 struct pci_dev *pdev = udev->pdev;
220 if (udev->mode == IGBUIO_MSIX_INTR_MODE) {
221 struct msi_desc *desc;
223 list_for_each_entry(desc, &pdev->msi_list, list) {
224 igbuio_msix_mask_irq(desc, state);
227 else if (udev->mode == IGBUIO_LEGACY_INTR_MODE) {
231 pci_read_config_dword(pdev, PCI_COMMAND, &status);
234 new = old & (~PCI_COMMAND_INTX_DISABLE);
236 new = old | PCI_COMMAND_INTX_DISABLE;
239 pci_write_config_word(pdev, PCI_COMMAND, new);
246 * This is the irqcontrol callback to be registered to uio_info.
247 * It can be used to disable/enable interrupt from user space processes.
250 * pointer to uio_info.
252 * state value. 1 to enable interrupt, 0 to disable interrupt.
256 * - On failure, a negative value.
259 igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
262 struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
263 struct pci_dev *pdev = udev->pdev;
265 spin_lock_irqsave(&udev->lock, flags);
266 if (!pci_lock(pdev)) {
267 spin_unlock_irqrestore(&udev->lock, flags);
271 igbuio_set_interrupt_mask(udev, irq_state);
274 spin_unlock_irqrestore(&udev->lock, flags);
280 * This is interrupt handler which will check if the interrupt is for the right device.
281 * If yes, disable it here and will be enable later.
284 igbuio_pci_irqhandler(int irq, struct uio_info *info)
286 irqreturn_t ret = IRQ_NONE;
288 struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
289 struct pci_dev *pdev = udev->pdev;
290 uint32_t cmd_status_dword;
293 spin_lock_irqsave(&udev->lock, flags);
294 /* block userspace PCI config reads/writes */
298 /* for legacy mode, interrupt maybe shared */
299 if (udev->mode == IGBUIO_LEGACY_INTR_MODE) {
300 pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword);
301 status = cmd_status_dword >> 16;
302 /* interrupt is not ours, goes to out */
303 if (!(status & PCI_STATUS_INTERRUPT))
307 igbuio_set_interrupt_mask(udev, 0);
310 /* unblock userspace PCI config reads/writes */
313 spin_unlock_irqrestore(&udev->lock, flags);
314 printk(KERN_INFO "irq 0x%x %s\n", irq, (ret == IRQ_HANDLED) ? "handled" : "not handled");
319 #ifdef CONFIG_XEN_DOM0
321 igbuio_dom0_mmap_phys(struct uio_info *info, struct vm_area_struct *vma)
324 idx = (int)vma->vm_pgoff;
325 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
326 vma->vm_page_prot.pgprot |= _PAGE_IOMAP;
328 return remap_pfn_range(vma,
330 info->mem[idx].addr >> PAGE_SHIFT,
331 vma->vm_end - vma->vm_start,
336 * This is uio device mmap method which will use igbuio mmap for Xen
340 igbuio_dom0_pci_mmap(struct uio_info *info, struct vm_area_struct *vma)
344 if (vma->vm_pgoff >= MAX_UIO_MAPS)
346 if(info->mem[vma->vm_pgoff].size == 0)
349 idx = (int)vma->vm_pgoff;
350 switch (info->mem[idx].memtype) {
352 return igbuio_dom0_mmap_phys(info, vma);
353 case UIO_MEM_LOGICAL:
354 case UIO_MEM_VIRTUAL:
361 /* Remap pci resources described by bar #pci_bar in uio resource n. */
363 igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
364 int n, int pci_bar, const char *name)
366 unsigned long addr, len;
369 if (sizeof(info->mem) / sizeof (info->mem[0]) <= n)
372 addr = pci_resource_start(dev, pci_bar);
373 len = pci_resource_len(dev, pci_bar);
374 if (addr == 0 || len == 0)
376 internal_addr = ioremap(addr, len);
377 if (internal_addr == NULL)
379 info->mem[n].name = name;
380 info->mem[n].addr = addr;
381 info->mem[n].internal_addr = internal_addr;
382 info->mem[n].size = len;
383 info->mem[n].memtype = UIO_MEM_PHYS;
387 /* Get pci port io resources described by bar #pci_bar in uio resource n. */
389 igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
390 int n, int pci_bar, const char *name)
392 unsigned long addr, len;
394 if (sizeof(info->port) / sizeof (info->port[0]) <= n)
397 addr = pci_resource_start(dev, pci_bar);
398 len = pci_resource_len(dev, pci_bar);
399 if (addr == 0 || len == 0)
402 info->port[n].name = name;
403 info->port[n].start = addr;
404 info->port[n].size = len;
405 info->port[n].porttype = UIO_PORT_X86;
410 /* Unmap previously ioremap'd resources */
412 igbuio_pci_release_iomem(struct uio_info *info)
415 for (i = 0; i < MAX_UIO_MAPS; i++) {
416 if (info->mem[i].internal_addr)
417 iounmap(info->mem[i].internal_addr);
422 igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
424 int i, iom, iop, ret;
426 static const char *bar_names[PCI_STD_RESOURCE_END + 1] = {
438 for (i = 0; i != sizeof(bar_names) / sizeof(bar_names[0]); i++) {
439 if (pci_resource_len(dev, i) != 0 &&
440 pci_resource_start(dev, i) != 0) {
441 flags = pci_resource_flags(dev, i);
442 if (flags & IORESOURCE_MEM) {
443 if ((ret = igbuio_pci_setup_iomem(dev, info,
444 iom, i, bar_names[i])) != 0)
447 } else if (flags & IORESOURCE_IO) {
448 if ((ret = igbuio_pci_setup_ioport(dev, info,
449 iop, i, bar_names[i])) != 0)
456 return ((iom != 0) ? ret : ENOENT);
459 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0)
464 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
466 struct rte_uio_pci_dev *udev;
468 udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
473 * enable device: ask low-level code to enable I/O and
476 if (pci_enable_device(dev)) {
477 printk(KERN_ERR "Cannot enable PCI device\n");
482 * reserve device's PCI memory regions for use by this
485 if (pci_request_regions(dev, "igb_uio")) {
486 printk(KERN_ERR "Cannot request regions\n");
490 /* enable bus mastering on the device */
493 /* remap IO memory */
494 if (igbuio_setup_bars(dev, &udev->info))
495 goto fail_release_iomem;
497 /* set 64-bit DMA mask */
498 if (pci_set_dma_mask(dev, DMA_BIT_MASK(64))) {
499 printk(KERN_ERR "Cannot set DMA mask\n");
500 goto fail_release_iomem;
501 } else if (pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64))) {
502 printk(KERN_ERR "Cannot set consistent DMA mask\n");
503 goto fail_release_iomem;
507 udev->info.name = "Intel IGB UIO";
508 udev->info.version = "0.1";
509 udev->info.handler = igbuio_pci_irqhandler;
510 udev->info.irqcontrol = igbuio_pci_irqcontrol;
511 #ifdef CONFIG_XEN_DOM0
512 /* check if the driver run on Xen Dom0 */
513 if (xen_initial_domain())
514 udev->info.mmap = igbuio_dom0_pci_mmap;
516 udev->info.priv = udev;
518 udev->mode = 0; /* set the default value for interrupt mode */
519 spin_lock_init(&udev->lock);
521 /* check if it need to try msix first */
522 if (igbuio_intr_mode_preferred == IGBUIO_MSIX_INTR_MODE) {
525 for (vector = 0; vector < IGBUIO_NUM_MSI_VECTORS; vector ++)
526 udev->msix_entries[vector].entry = vector;
528 if (pci_enable_msix(udev->pdev, udev->msix_entries, IGBUIO_NUM_MSI_VECTORS) == 0) {
529 udev->mode = IGBUIO_MSIX_INTR_MODE;
532 pci_disable_msix(udev->pdev);
533 printk(KERN_INFO "fail to enable pci msix, or not enough msix entries\n");
536 switch (udev->mode) {
537 case IGBUIO_MSIX_INTR_MODE:
538 udev->info.irq_flags = 0;
539 udev->info.irq = udev->msix_entries[0].vector;
541 case IGBUIO_MSI_INTR_MODE:
543 case IGBUIO_LEGACY_INTR_MODE:
544 udev->info.irq_flags = IRQF_SHARED;
545 udev->info.irq = dev->irq;
551 pci_set_drvdata(dev, udev);
552 igbuio_pci_irqcontrol(&udev->info, 0);
554 if (sysfs_create_group(&dev->dev.kobj, &dev_attr_grp))
555 goto fail_release_iomem;
557 /* register uio driver */
558 if (uio_register_device(&dev->dev, &udev->info))
559 goto fail_release_iomem;
561 printk(KERN_INFO "uio device registered with irq %lx\n", udev->info.irq);
566 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
567 igbuio_pci_release_iomem(&udev->info);
568 if (udev->mode == IGBUIO_MSIX_INTR_MODE)
569 pci_disable_msix(udev->pdev);
570 pci_release_regions(dev);
572 pci_disable_device(dev);
580 igbuio_pci_remove(struct pci_dev *dev)
582 struct uio_info *info = pci_get_drvdata(dev);
584 if (info->priv == NULL) {
585 printk(KERN_DEBUG "Not igbuio device\n");
589 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
590 uio_unregister_device(info);
591 igbuio_pci_release_iomem(info);
592 if (((struct rte_uio_pci_dev *)info->priv)->mode ==
593 IGBUIO_MSIX_INTR_MODE)
594 pci_disable_msix(dev);
595 pci_release_regions(dev);
596 pci_disable_device(dev);
597 pci_set_drvdata(dev, NULL);
602 igbuio_config_intr_mode(char *intr_str)
605 printk(KERN_INFO "Use MSIX interrupt by default\n");
609 if (!strcmp(intr_str, "msix")) {
610 igbuio_intr_mode_preferred = IGBUIO_MSIX_INTR_MODE;
611 printk(KERN_INFO "Use MSIX interrupt\n");
612 } else if (!strcmp(intr_str, "legacy")) {
613 igbuio_intr_mode_preferred = IGBUIO_LEGACY_INTR_MODE;
614 printk(KERN_INFO "Use legacy interrupt\n");
616 printk(KERN_INFO "Error: bad parameter - %s\n", intr_str);
623 static struct pci_driver igbuio_pci_driver = {
625 .id_table = igbuio_pci_ids,
626 .probe = igbuio_pci_probe,
627 .remove = igbuio_pci_remove,
631 igbuio_pci_init_module(void)
635 ret = igbuio_config_intr_mode(intr_mode);
639 return pci_register_driver(&igbuio_pci_driver);
643 igbuio_pci_exit_module(void)
645 pci_unregister_driver(&igbuio_pci_driver);
648 module_init(igbuio_pci_init_module);
649 module_exit(igbuio_pci_exit_module);
651 module_param(intr_mode, charp, S_IRUGO | S_IWUSR);
652 MODULE_PARM_DESC(intr_mode,
653 "igb_uio interrupt mode (default=msix):\n"
654 " msix Use MSIX interrupt\n"
655 " legacy Use Legacy interrupt\n"
658 MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
659 MODULE_LICENSE("GPL");
660 MODULE_AUTHOR("Intel Corporation");