4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of version 2 of the GNU General Public License as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution
19 * in the file called LICENSE.GPL.
21 * Contact Information:
25 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/uio_driver.h>
32 #include <linux/msi.h>
33 #include <linux/version.h>
34 #include <linux/slab.h>
36 #ifdef CONFIG_XEN_DOM0
39 #include <rte_pci_dev_features.h>
44 * A structure describing the private information for a uio device.
46 struct rte_uio_pci_dev {
49 enum rte_intr_mode mode;
52 static char *intr_mode;
53 static enum rte_intr_mode igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
57 show_max_vfs(struct device *dev, struct device_attribute *attr,
60 return snprintf(buf, 10, "%u\n", dev_num_vf(dev));
64 store_max_vfs(struct device *dev, struct device_attribute *attr,
65 const char *buf, size_t count)
68 unsigned long max_vfs;
69 struct pci_dev *pdev = to_pci_dev(dev);
71 if (0 != kstrtoul(buf, 0, &max_vfs))
75 pci_disable_sriov(pdev);
76 else if (0 == pci_num_vf(pdev))
77 err = pci_enable_sriov(pdev, max_vfs);
78 else /* do nothing if change max_vfs number */
81 return err ? err : count;
84 static DEVICE_ATTR(max_vfs, S_IRUGO | S_IWUSR, show_max_vfs, store_max_vfs);
86 static struct attribute *dev_attrs[] = {
87 &dev_attr_max_vfs.attr,
91 static const struct attribute_group dev_attr_grp = {
95 * It masks the msix on/off of generating MSI-X messages.
98 igbuio_msix_mask_irq(struct msi_desc *desc, int32_t state)
100 u32 mask_bits = desc->masked;
101 unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
102 PCI_MSIX_ENTRY_VECTOR_CTRL;
105 mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
107 mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
109 if (mask_bits != desc->masked) {
110 writel(mask_bits, desc->mask_base + offset);
111 readl(desc->mask_base);
112 desc->masked = mask_bits;
117 * This is the irqcontrol callback to be registered to uio_info.
118 * It can be used to disable/enable interrupt from user space processes.
121 * pointer to uio_info.
123 * state value. 1 to enable interrupt, 0 to disable interrupt.
127 * - On failure, a negative value.
130 igbuio_pci_irqcontrol(struct uio_info *info, s32 irq_state)
132 struct rte_uio_pci_dev *udev = info->priv;
133 struct pci_dev *pdev = udev->pdev;
135 pci_cfg_access_lock(pdev);
136 if (udev->mode == RTE_INTR_MODE_LEGACY)
137 pci_intx(pdev, !!irq_state);
139 else if (udev->mode == RTE_INTR_MODE_MSIX) {
140 struct msi_desc *desc;
142 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
143 list_for_each_entry(desc, &pdev->msi_list, list)
144 igbuio_msix_mask_irq(desc, irq_state);
146 list_for_each_entry(desc, &pdev->dev.msi_list, list)
147 igbuio_msix_mask_irq(desc, irq_state);
150 pci_cfg_access_unlock(pdev);
156 * This is interrupt handler which will check if the interrupt is for the right device.
157 * If yes, disable it here and will be enable later.
160 igbuio_pci_irqhandler(int irq, struct uio_info *info)
162 struct rte_uio_pci_dev *udev = info->priv;
164 /* Legacy mode need to mask in hardware */
165 if (udev->mode == RTE_INTR_MODE_LEGACY &&
166 !pci_check_and_mask_intx(udev->pdev))
169 /* Message signal mode, no share IRQ and automasked */
173 #ifdef CONFIG_XEN_DOM0
175 igbuio_dom0_mmap_phys(struct uio_info *info, struct vm_area_struct *vma)
179 idx = (int)vma->vm_pgoff;
180 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
181 #ifdef HAVE_PTE_MASK_PAGE_IOMAP
182 vma->vm_page_prot.pgprot |= _PAGE_IOMAP;
185 return remap_pfn_range(vma,
187 info->mem[idx].addr >> PAGE_SHIFT,
188 vma->vm_end - vma->vm_start,
193 * This is uio device mmap method which will use igbuio mmap for Xen
197 igbuio_dom0_pci_mmap(struct uio_info *info, struct vm_area_struct *vma)
201 if (vma->vm_pgoff >= MAX_UIO_MAPS)
204 if (info->mem[vma->vm_pgoff].size == 0)
207 idx = (int)vma->vm_pgoff;
208 switch (info->mem[idx].memtype) {
210 return igbuio_dom0_mmap_phys(info, vma);
211 case UIO_MEM_LOGICAL:
212 case UIO_MEM_VIRTUAL:
219 /* Remap pci resources described by bar #pci_bar in uio resource n. */
221 igbuio_pci_setup_iomem(struct pci_dev *dev, struct uio_info *info,
222 int n, int pci_bar, const char *name)
224 unsigned long addr, len;
227 if (n >= ARRAY_SIZE(info->mem))
230 addr = pci_resource_start(dev, pci_bar);
231 len = pci_resource_len(dev, pci_bar);
232 if (addr == 0 || len == 0)
234 internal_addr = ioremap(addr, len);
235 if (internal_addr == NULL)
237 info->mem[n].name = name;
238 info->mem[n].addr = addr;
239 info->mem[n].internal_addr = internal_addr;
240 info->mem[n].size = len;
241 info->mem[n].memtype = UIO_MEM_PHYS;
245 /* Get pci port io resources described by bar #pci_bar in uio resource n. */
247 igbuio_pci_setup_ioport(struct pci_dev *dev, struct uio_info *info,
248 int n, int pci_bar, const char *name)
250 unsigned long addr, len;
252 if (n >= ARRAY_SIZE(info->port))
255 addr = pci_resource_start(dev, pci_bar);
256 len = pci_resource_len(dev, pci_bar);
257 if (addr == 0 || len == 0)
260 info->port[n].name = name;
261 info->port[n].start = addr;
262 info->port[n].size = len;
263 info->port[n].porttype = UIO_PORT_X86;
268 /* Unmap previously ioremap'd resources */
270 igbuio_pci_release_iomem(struct uio_info *info)
274 for (i = 0; i < MAX_UIO_MAPS; i++) {
275 if (info->mem[i].internal_addr)
276 iounmap(info->mem[i].internal_addr);
281 igbuio_setup_bars(struct pci_dev *dev, struct uio_info *info)
283 int i, iom, iop, ret;
285 static const char *bar_names[PCI_STD_RESOURCE_END + 1] = {
297 for (i = 0; i < ARRAY_SIZE(bar_names); i++) {
298 if (pci_resource_len(dev, i) != 0 &&
299 pci_resource_start(dev, i) != 0) {
300 flags = pci_resource_flags(dev, i);
301 if (flags & IORESOURCE_MEM) {
302 ret = igbuio_pci_setup_iomem(dev, info, iom,
307 } else if (flags & IORESOURCE_IO) {
308 ret = igbuio_pci_setup_ioport(dev, info, iop,
317 return (iom != 0 || iop != 0) ? ret : -ENOENT;
320 #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)
325 igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
327 struct rte_uio_pci_dev *udev;
328 #ifdef HAVE_PCI_ENABLE_MSIX
329 struct msix_entry msix_entry;
331 dma_addr_t map_dma_addr;
335 udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
340 * enable device: ask low-level code to enable I/O and
343 err = pci_enable_device(dev);
345 dev_err(&dev->dev, "Cannot enable PCI device\n");
349 /* enable bus mastering on the device */
352 /* remap IO memory */
353 err = igbuio_setup_bars(dev, &udev->info);
355 goto fail_release_iomem;
357 /* set 64-bit DMA mask */
358 err = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
360 dev_err(&dev->dev, "Cannot set DMA mask\n");
361 goto fail_release_iomem;
364 err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
366 dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
367 goto fail_release_iomem;
371 udev->info.name = "igb_uio";
372 udev->info.version = "0.1";
373 udev->info.handler = igbuio_pci_irqhandler;
374 udev->info.irqcontrol = igbuio_pci_irqcontrol;
375 #ifdef CONFIG_XEN_DOM0
376 /* check if the driver run on Xen Dom0 */
377 if (xen_initial_domain())
378 udev->info.mmap = igbuio_dom0_pci_mmap;
380 udev->info.priv = udev;
383 switch (igbuio_intr_mode_preferred) {
384 case RTE_INTR_MODE_MSIX:
385 /* Only 1 msi-x vector needed */
386 #ifdef HAVE_PCI_ENABLE_MSIX
387 msix_entry.entry = 0;
388 if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
389 dev_dbg(&dev->dev, "using MSI-X");
390 udev->info.irq_flags = IRQF_NO_THREAD;
391 udev->info.irq = msix_entry.vector;
392 udev->mode = RTE_INTR_MODE_MSIX;
396 if (pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_MSIX) == 1) {
397 dev_dbg(&dev->dev, "using MSI-X");
398 udev->info.irq = pci_irq_vector(dev, 0);
399 udev->mode = RTE_INTR_MODE_MSIX;
403 /* fall back to INTX */
404 case RTE_INTR_MODE_LEGACY:
405 if (pci_intx_mask_supported(dev)) {
406 dev_dbg(&dev->dev, "using INTX");
407 udev->info.irq_flags = IRQF_SHARED | IRQF_NO_THREAD;
408 udev->info.irq = dev->irq;
409 udev->mode = RTE_INTR_MODE_LEGACY;
412 dev_notice(&dev->dev, "PCI INTX mask not supported\n");
413 /* fall back to no IRQ */
414 case RTE_INTR_MODE_NONE:
415 udev->mode = RTE_INTR_MODE_NONE;
420 dev_err(&dev->dev, "invalid IRQ mode %u",
421 igbuio_intr_mode_preferred);
423 goto fail_release_iomem;
426 err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
428 goto fail_release_iomem;
430 /* register uio driver */
431 err = uio_register_device(&dev->dev, &udev->info);
433 goto fail_remove_group;
435 pci_set_drvdata(dev, udev);
437 dev_info(&dev->dev, "uio device registered with irq %lx\n",
441 * Doing a harmless dma mapping for attaching the device to
442 * the iommu identity mapping if kernel boots with iommu=pt.
443 * Note this is not a problem if no IOMMU at all.
445 map_addr = dma_alloc_coherent(&dev->dev, 1024, &map_dma_addr,
448 memset(map_addr, 0, 1024);
451 dev_info(&dev->dev, "dma mapping failed\n");
453 dev_info(&dev->dev, "mapping 1K dma=%#llx host=%p\n",
454 (unsigned long long)map_dma_addr, map_addr);
456 dma_free_coherent(&dev->dev, 1024, map_addr, map_dma_addr);
457 dev_info(&dev->dev, "unmapping 1K dma=%#llx host=%p\n",
458 (unsigned long long)map_dma_addr, map_addr);
464 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
466 igbuio_pci_release_iomem(&udev->info);
467 if (udev->mode == RTE_INTR_MODE_MSIX)
468 pci_disable_msix(udev->pdev);
469 pci_disable_device(dev);
477 igbuio_pci_remove(struct pci_dev *dev)
479 struct rte_uio_pci_dev *udev = pci_get_drvdata(dev);
481 sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
482 uio_unregister_device(&udev->info);
483 igbuio_pci_release_iomem(&udev->info);
484 if (udev->mode == RTE_INTR_MODE_MSIX)
485 pci_disable_msix(dev);
486 pci_disable_device(dev);
487 pci_set_drvdata(dev, NULL);
492 igbuio_config_intr_mode(char *intr_str)
495 pr_info("Use MSIX interrupt by default\n");
499 if (!strcmp(intr_str, RTE_INTR_MODE_MSIX_NAME)) {
500 igbuio_intr_mode_preferred = RTE_INTR_MODE_MSIX;
501 pr_info("Use MSIX interrupt\n");
502 } else if (!strcmp(intr_str, RTE_INTR_MODE_LEGACY_NAME)) {
503 igbuio_intr_mode_preferred = RTE_INTR_MODE_LEGACY;
504 pr_info("Use legacy interrupt\n");
506 pr_info("Error: bad parameter - %s\n", intr_str);
513 static struct pci_driver igbuio_pci_driver = {
516 .probe = igbuio_pci_probe,
517 .remove = igbuio_pci_remove,
521 igbuio_pci_init_module(void)
525 ret = igbuio_config_intr_mode(intr_mode);
529 return pci_register_driver(&igbuio_pci_driver);
533 igbuio_pci_exit_module(void)
535 pci_unregister_driver(&igbuio_pci_driver);
538 module_init(igbuio_pci_init_module);
539 module_exit(igbuio_pci_exit_module);
541 module_param(intr_mode, charp, S_IRUGO);
542 MODULE_PARM_DESC(intr_mode,
543 "igb_uio interrupt mode (default=msix):\n"
544 " " RTE_INTR_MODE_MSIX_NAME " Use MSIX interrupt\n"
545 " " RTE_INTR_MODE_LEGACY_NAME " Use Legacy interrupt\n"
548 MODULE_DESCRIPTION("UIO driver for Intel IGB PCI cards");
549 MODULE_LICENSE("GPL");
550 MODULE_AUTHOR("Intel Corporation");