4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
21 * Contact Information:
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/uio_driver.h>
32 #include <linux/msi.h>
33 #include <linux/version.h>
35 #ifdef CONFIG_XEN_DOM0
38 #include <rte_pci_dev_features.h>
41 * MSI-X related macros, copy from linux/pci_regs.h in kernel 2.6.39,
42 * but none of them in kernel 2.6.35.
44 #ifndef PCI_MSIX_ENTRY_SIZE
45 #define PCI_MSIX_ENTRY_SIZE 16
46 #define PCI_MSIX_ENTRY_LOWER_ADDR 0
47 #define PCI_MSIX_ENTRY_UPPER_ADDR 4
48 #define PCI_MSIX_ENTRY_DATA 8
49 #define PCI_MSIX_ENTRY_VECTOR_CTRL 12
50 #define PCI_MSIX_ENTRY_CTRL_MASKBIT 1
54 #define PCI_SYS_FILE_BUF_SIZE 10
55 #define PCI_DEV_CAP_REG 0xA4
56 #define PCI_DEV_CTRL_REG 0xA8
57 #define PCI_DEV_CAP_EXT_TAG_MASK 0x20
58 #define PCI_DEV_CTRL_EXT_TAG_SHIFT 8
59 #define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT)
62 #define IGBUIO_NUM_MSI_VECTORS 1
65 * A structure describing the private information for a uio device.
67 struct rte_uio_pci_dev {
70 spinlock_t lock; /* spinlock for accessing PCI config space or msix data in multi tasks/isr */
71 enum rte_intr_mode mode;
73 msix_entries[IGBUIO_NUM_MSI_VECTORS]; /* pointer to the msix vectors to be allocated later */
76 static char *intr_mode = NULL;
77 static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
79 static inline struct rte_uio_pci_dev *
80 igbuio_get_uio_pci_dev(struct uio_info *info)
82 return container_of(info, struct rte_uio_pci_dev, info);
86 int local_pci_num_vf(struct pci_dev *dev)
88 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 34)
97 } *iov = (struct iov *)dev->sriov;
102 return iov->nr_virtfn;
104 return pci_num_vf(dev);
109 show_max_vfs(struct device *dev, struct device_attribute *attr,
112 return snprintf(buf, 10, "%u\n", local_pci_num_vf(
113 container_of(dev, struct pci_dev, dev)));
117 store_max_vfs(struct device *dev, struct device_attribute *attr,
118 const char *buf, size_t count)
121 unsigned long max_vfs;
122 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
124 if (0 != strict_strtoul(buf, 0, &max_vfs))
128 pci_disable_sriov(pdev);
129 else if (0 == local_pci_num_vf(pdev))
130 err = pci_enable_sriov(pdev, max_vfs);
131 else /* do nothing if change max_vfs number */
134 return err ? err : count;
137 #ifdef RTE_PCI_CONFIG
139 show_extended_tag(struct device *dev, struct device_attribute *attr, char *buf)
141 struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
144 pci_read_config_dword(pci_dev, PCI_DEV_CAP_REG, &val);
145 if (!(val & PCI_DEV_CAP_EXT_TAG_MASK)) /* Not supported */
146 return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%s\n", "invalid");
149 pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
150 PCI_DEV_CTRL_REG, &val);
152 return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%s\n",
153 (val & PCI_DEV_CTRL_EXT_TAG_MASK) ? "on" : "off");
157 store_extended_tag(struct device *dev,
158 struct device_attribute *attr,
162 struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
163 uint32_t val = 0, enable;
165 if (strncmp(buf, "on", 2) == 0)
167 else if (strncmp(buf, "off", 3) == 0)
172 pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
173 PCI_DEV_CAP_REG, &val);
174 if (!(val & PCI_DEV_CAP_EXT_TAG_MASK)) /* Not supported */
178 pci_bus_read_config_dword(pci_dev->bus, pci_dev->devfn,
179 PCI_DEV_CTRL_REG, &val);
181 val |= PCI_DEV_CTRL_EXT_TAG_MASK;
183 val &= ~PCI_DEV_CTRL_EXT_TAG_MASK;
184 pci_bus_write_config_dword(pci_dev->bus, pci_dev->devfn,
185 PCI_DEV_CTRL_REG, val);
191 show_max_read_request_size(struct device *dev,
192 struct device_attribute *attr,
195 struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
196 int val = pcie_get_readrq(pci_dev);
198 return snprintf(buf, PCI_SYS_FILE_BUF_SIZE, "%d\n", val);
202 store_max_read_request_size(struct device *dev,
203 struct device_attribute *attr,
207 struct pci_dev *pci_dev = container_of(dev, struct pci_dev, dev);
208 unsigned long size = 0;
211 if (strict_strtoul(buf, 0, &size) != 0)
214 ret = pcie_set_readrq(pci_dev, (int)size);
222 static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
223 #ifdef RTE_PCI_CONFIG
224 static DEVICE_ATTR(extended_tag, S_IRUGO | S_IWUSR, show_extended_tag,
226 static DEVICE_ATTR(max_read_request_size, S_IRUGO | S_IWUSR,
227 show_max_read_request_size, store_max_read_request_size);
230 static struct attribute *dev_attrs[] = {
231 &dev_attr_max_vfs.attr,
232 #ifdef RTE_PCI_CONFIG
233 &dev_attr_extended_tag.attr,
234 &dev_attr_max_read_request_size.attr,
239 static const struct attribute_group dev_attr_grp = {
244 pci_lock(struct pci_dev * pdev)
246 /* Some function names changes between 3.2.0 and 3.3.0... */
247 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
248 pci_block_user_cfg_access(pdev);
251 return pci_cfg_access_trylock(pdev);
256 pci_unlock(struct pci_dev * pdev)
258 /* Some function names changes between 3.2.0 and 3.3.0... */
259 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
260 pci_unblock_user_cfg_access(pdev);
262 pci_cfg_access_unlock(pdev);
267 * It masks the msix on/off of generating MSI-X messages.
270 igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
272 uint32_t mask_bits = desc->masked;
273 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
274 PCI_MSIX_ENTRY_VECTOR_CTRL;
277 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
279 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
281 if (mask_bits != desc->masked) {
282 writel(mask_bits, desc->mask_base + offset);
283 readl(desc->mask_base);
284 desc->masked = mask_bits;
291 * This function sets/clears the masks for generating LSC interrupts.
294 * The pointer to struct uio_info.
296 * The on/off flag of masking LSC.
298 * -On success, zero value.
299 * -On failure, a negative value.
302 igbuio_set_interrupt_mask(struct rte_uio_pci_dev *udev, int32_t state)
304 struct pci_dev *pdev = udev->pdev;
306 if (udev->mode == RTE_INTR_MODE_MSIX) {
307 struct msi_desc *desc;
309 list_for_each_entry(desc, &pdev->msi_list, list) {
310 igbuio_msix_mask_irq(desc, state);
312 } else if (udev->mode == RTE_INTR_MODE_LEGACY) {
316 pci_read_config_dword(pdev, PCI_COMMAND, &status);
319 new = old & (~PCI_COMMAND_INTX_DISABLE);
321 new = old | PCI_COMMAND_INTX_DISABLE;
324 pci_write_config_word(pdev, PCI_COMMAND, new);
331 * This is the irqcontrol callback to be registered to uio_info.
332 * It can be used to disable/enable interrupt from user space processes.
335 * pointer to uio_info.
337 * state value. 1 to enable interrupt, 0 to disable interrupt.
341 * - On failure, a negative value.
344 igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
347 struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
348 struct pci_dev *pdev = udev->pdev;
350 spin_lock_irqsave(&udev->lock, flags);
351 if (!pci_lock(pdev)) {
352 spin_unlock_irqrestore(&udev->lock, flags);
356 igbuio_set_interrupt_mask(udev, irq_state);
359 spin_unlock_irqrestore(&udev->lock, flags);
365 * This is interrupt handler which will check if the interrupt is for the right device.
366 * If yes, disable it here and will be enable later.
369 igbuio_pci_irqhandler(int irq, struct uio_info *info)
371 irqreturn_t ret = IRQ_NONE;
373 struct rte_uio_pci_dev *udev = igbuio_get_uio_pci_dev(info);
374 struct pci_dev *pdev = udev->pdev;
375 uint32_t cmd_status_dword;
378 spin_lock_irqsave(&udev->lock, flags);
379 /* block userspace PCI config reads/writes */
383 /* for legacy mode, interrupt maybe shared */
384 if (udev->mode == RTE_INTR_MODE_LEGACY) {
385 pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword);
386 status = cmd_status_dword >> 16;
387 /* interrupt is not ours, goes to out */
388 if (!(status & PCI_STATUS_INTERRUPT))
392 igbuio_set_interrupt_mask(udev, 0);
395 /* unblock userspace PCI config reads/writes */
398 spin_unlock_irqrestore(&udev->lock, flags);
399 pr_info("irq 0x%x %s\n", irq, (ret == IRQ_HANDLED) ? "handled" : "not handled");
404 #ifdef CONFIG_XEN_DOM0
406 igbuio_dom0_mmap_phys(struct uio_info *info, struct vm_area_struct *vma)
410 idx = (int)vma->vm_pgoff;
411 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
412 vma->vm_page_prot.pgprot |= _PAGE_IOMAP;
414 return remap_pfn_range(vma,
416 info->mem[idx].addr >> PAGE_SHIFT,
417 vma->vm_end - vma->vm_start,
422 * This is uio device mmap method which will use igbuio mmap for Xen
426 igbuio_dom0_pci_mmap(struct uio_info *info, struct vm_area_struct *vma)
430 if (vma->vm_pgoff >= MAX_UIO_MAPS)
433 if (info->mem[vma->vm_pgoff].size == 0)
436 idx = (int)vma->vm_pgoff;
437 switch (info->mem[idx].memtype) {
439 return igbuio_dom0_mmap_phys(info, vma);
440 case UIO_MEM_LOGICAL:
441 case UIO_MEM_VIRTUAL:
448 /* Remap pci resources described by bar #pci_bar in uio resource n. */
450 igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
451 int n, int pci_bar, const char *name)
453 unsigned long addr, len;
456 if (sizeof(info->mem) / sizeof(info->mem[0]) <= n)
459 addr = pci_resource_start(dev, pci_bar);
460 len = pci_resource_len(dev, pci_bar);
461 if (addr == 0 || len == 0)
463 internal_addr = ioremap(addr, len);
464 if (internal_addr == NULL)
466 info->mem[n].name = name;
467 info->mem[n].addr = addr;
468 info->mem[n].internal_addr = internal_addr;
469 info->mem[n].size = len;
470 info->mem[n].memtype = UIO_MEM_PHYS;
474 /* Get pci port io resources described by bar #pci_bar in uio resource n. */
476 igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
477 int n, int pci_bar, const char *name)
479 unsigned long addr, len;
481 if (sizeof(info->port) / sizeof(info->port[0]) <= n)
484 addr = pci_resource_start(dev, pci_bar);
485 len = pci_resource_len(dev, pci_bar);
486 if (addr == 0 || len == 0)
489 info->port[n].name = name;
490 info->port[n].start = addr;
491 info->port[n].size = len;
492 info->port[n].porttype = UIO_PORT_X86;
497 /* Unmap previously ioremap'd resources */
499 igbuio_pci_release_iomem(struct uio_info *info)
503 for (i = 0; i < MAX_UIO_MAPS; i++) {
504 if (info->mem[i].internal_addr)
505 iounmap(info->mem[i].internal_addr);
510 igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
512 int i, iom, iop, ret;
514 static const char *bar_names[PCI_STD_RESOURCE_END + 1] = {
526 for (i = 0; i != sizeof(bar_names) / sizeof(bar_names[0]); i++) {
527 if (pci_resource_len(dev, i) != 0 &&
528 pci_resource_start(dev, i) != 0) {
529 flags = pci_resource_flags(dev, i);
530 if (flags & IORESOURCE_MEM) {
531 ret = igbuio_pci_setup_iomem(dev, info, iom,
536 } else if (flags & IORESOURCE_IO) {
537 ret = igbuio_pci_setup_ioport(dev, info, iop,
546 return ((iom != 0) ? ret : ENOENT);
549 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
554 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
556 struct rte_uio_pci_dev *udev;
558 udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
563 * enable device: ask low-level code to enable I/O and
566 if (pci_enable_device(dev)) {
567 dev_err(&dev->dev, "Cannot enable PCI device\n");
572 * reserve device's PCI memory regions for use by this
575 if (pci_request_regions(dev, "igb_uio")) {
576 dev_err(&dev->dev, "Cannot request regions\n");
580 /* enable bus mastering on the device */
583 /* remap IO memory */
584 if (igbuio_setup_bars(dev, &udev->info))
585 goto fail_release_iomem;
587 /* set 64-bit DMA mask */
588 if (pci_set_dma_mask(dev, DMA_BIT_MASK(64))) {
589 dev_err(&dev->dev, "Cannot set DMA mask\n");
590 goto fail_release_iomem;
591 } else if (pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64))) {
592 dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
593 goto fail_release_iomem;
597 udev->info.name = "igb_uio";
598 udev->info.version = "0.1";
599 udev->info.handler = igbuio_pci_irqhandler;
600 udev->info.irqcontrol = igbuio_pci_irqcontrol;
601 #ifdef CONFIG_XEN_DOM0
602 /* check if the driver run on Xen Dom0 */
603 if (xen_initial_domain())
604 udev->info.mmap = igbuio_dom0_pci_mmap;
606 udev->info.priv = udev;
608 udev->mode = RTE_INTR_MODE_LEGACY;
609 spin_lock_init(&udev->lock);
611 /* check if it need to try msix first */
612 if (igbuio_intr_mode_preferred == RTE_INTR_MODE_MSIX) {
615 for (vector = 0; vector < IGBUIO_NUM_MSI_VECTORS; vector ++)
616 udev->msix_entries[vector].entry = vector;
618 if (pci_enable_msix(udev->pdev, udev->msix_entries, IGBUIO_NUM_MSI_VECTORS) == 0) {
619 udev->mode = RTE_INTR_MODE_MSIX;
622 pci_disable_msix(udev->pdev);
623 pr_info("fail to enable pci msix, or not enough msix entries\n");
626 switch (udev->mode) {
627 case RTE_INTR_MODE_MSIX:
628 udev->info.irq_flags = 0;
629 udev->info.irq = udev->msix_entries[0].vector;
631 case RTE_INTR_MODE_MSI:
633 case RTE_INTR_MODE_LEGACY:
634 udev->info.irq_flags = IRQF_SHARED;
635 udev->info.irq = dev->irq;
641 pci_set_drvdata(dev, udev);
642 igbuio_pci_irqcontrol(&udev->info, 0);
644 if (sysfs_create_group(&dev->dev.kobj, &dev_attr_grp))
645 goto fail_release_iomem;
647 /* register uio driver */
648 if (uio_register_device(&dev->dev, &udev->info))
649 goto fail_release_iomem;
651 pr_info("uio device registered with irq %lx\n", udev->info.irq);
656 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
657 igbuio_pci_release_iomem(&udev->info);
658 if (udev->mode == RTE_INTR_MODE_MSIX)
659 pci_disable_msix(udev->pdev);
660 pci_release_regions(dev);
662 pci_disable_device(dev);
670 igbuio_pci_remove(struct pci_dev *dev)
672 struct uio_info *info = pci_get_drvdata(dev);
674 if (info->priv == NULL) {
675 pr_notice("Not igbuio device\n");
679 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
680 uio_unregister_device(info);
681 igbuio_pci_release_iomem(info);
682 if (((struct rte_uio_pci_dev *)info->priv)->mode ==
684 pci_disable_msix(dev);
685 pci_release_regions(dev);
686 pci_disable_device(dev);
687 pci_set_drvdata(dev, NULL);
692 igbuio_config_intr_mode(char *intr_str)
695 pr_info("Use MSIX interrupt by default\n");
699 if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
700 igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
701 pr_info("Use MSIX interrupt\n");
702 } else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
703 igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
704 pr_info("Use legacy interrupt\n");
706 pr_info("Error: bad parameter - %s\n", intr_str);
713 static struct pci_driver igbuio_pci_driver = {
716 .probe = igbuio_pci_probe,
717 .remove = igbuio_pci_remove,
721 igbuio_pci_init_module(void)
725 ret = igbuio_config_intr_mode(intr_mode);
729 return pci_register_driver(&igbuio_pci_driver);
733 igbuio_pci_exit_module(void)
735 pci_unregister_driver(&igbuio_pci_driver);
738 module_init(igbuio_pci_init_module);
739 module_exit(igbuio_pci_exit_module);
741 module_param(intr_mode, charp, S_IRUGO | S_IWUSR);
742 MODULE_PARM_DESC(intr_mode,
743 "igb_uio interrupt mode (default=msix):\n"
744 " " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n"
745 " " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
748 MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
749 MODULE_LICENSE("GPL");
750 MODULE_AUTHOR("Intel Corporation");