4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <sys/types.h>
43 #include <rte_bus_pci.h>
44 #include <rte_tailq.h>
46 #include <rte_malloc.h>
50 static struct rte_tailq_elem rte_uio_tailq = {
51 .name = "UIO_RESOURCE_LIST",
53 EAL_REGISTER_TAILQ(rte_uio_tailq)
56 pci_uio_map_secondary(struct rte_pci_device *dev)
59 struct mapped_pci_resource *uio_res;
60 struct mapped_pci_res_list *uio_res_list =
61 RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
63 TAILQ_FOREACH(uio_res, uio_res_list, next) {
65 /* skip this element if it doesn't match our PCI address */
66 if (pci_addr_cmp(&uio_res->pci_addr, &dev->addr))
69 for (i = 0; i != uio_res->nb_maps; i++) {
71 * open devname, to mmap it
73 fd = open(uio_res->maps[i].path, O_RDWR);
75 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
76 uio_res->maps[i].path, strerror(errno));
80 void *mapaddr = pci_map_resource(uio_res->maps[i].addr,
81 fd, (off_t)uio_res->maps[i].offset,
82 (size_t)uio_res->maps[i].size, 0);
83 /* fd is not needed in slave process, close it */
85 if (mapaddr != uio_res->maps[i].addr) {
87 "Cannot mmap device resource file %s to address: %p\n",
88 uio_res->maps[i].path,
89 uio_res->maps[i].addr);
90 if (mapaddr != MAP_FAILED) {
91 /* unmap addrs correctly mapped */
92 for (j = 0; j < i; j++)
94 uio_res->maps[j].addr,
95 (size_t)uio_res->maps[j].size);
96 /* unmap addr wrongly mapped */
97 pci_unmap_resource(mapaddr,
98 (size_t)uio_res->maps[i].size);
106 RTE_LOG(ERR, EAL, "Cannot find resource for device\n");
110 /* map the PCI resource of a PCI device in virtual memory */
112 pci_uio_map_resource(struct rte_pci_device *dev)
114 int i, map_idx = 0, ret;
116 struct mapped_pci_resource *uio_res = NULL;
117 struct mapped_pci_res_list *uio_res_list =
118 RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
120 dev->intr_handle.fd = -1;
121 dev->intr_handle.uio_cfg_fd = -1;
122 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
124 /* secondary processes - use already recorded details */
125 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
126 return pci_uio_map_secondary(dev);
128 /* allocate uio resource */
129 ret = pci_uio_alloc_resource(dev, &uio_res);
134 for (i = 0; i != PCI_MAX_RESOURCE; i++) {
136 phaddr = dev->mem_resource[i].phys_addr;
140 ret = pci_uio_map_resource_by_index(dev, i,
148 uio_res->nb_maps = map_idx;
150 TAILQ_INSERT_TAIL(uio_res_list, uio_res, next);
154 for (i = 0; i < map_idx; i++) {
155 pci_unmap_resource(uio_res->maps[i].addr,
156 (size_t)uio_res->maps[i].size);
157 rte_free(uio_res->maps[i].path);
159 pci_uio_free_resource(dev, uio_res);
164 pci_uio_unmap(struct mapped_pci_resource *uio_res)
171 for (i = 0; i != uio_res->nb_maps; i++) {
172 pci_unmap_resource(uio_res->maps[i].addr,
173 (size_t)uio_res->maps[i].size);
174 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
175 rte_free(uio_res->maps[i].path);
179 static struct mapped_pci_resource *
180 pci_uio_find_resource(struct rte_pci_device *dev)
182 struct mapped_pci_resource *uio_res;
183 struct mapped_pci_res_list *uio_res_list =
184 RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
189 TAILQ_FOREACH(uio_res, uio_res_list, next) {
191 /* skip this element if it doesn't match our PCI address */
192 if (!pci_addr_cmp(&uio_res->pci_addr, &dev->addr))
198 /* unmap the PCI resource of a PCI device in virtual memory */
200 pci_uio_unmap_resource(struct rte_pci_device *dev)
202 struct mapped_pci_resource *uio_res;
203 struct mapped_pci_res_list *uio_res_list =
204 RTE_TAILQ_CAST(rte_uio_tailq.head, mapped_pci_res_list);
209 /* find an entry for the device */
210 uio_res = pci_uio_find_resource(dev);
214 /* secondary processes - just free maps */
215 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
216 return pci_uio_unmap(uio_res);
218 TAILQ_REMOVE(uio_res_list, uio_res, next);
220 /* unmap all resources */
221 pci_uio_unmap(uio_res);
223 /* free uio resource */
226 /* close fd if in primary process */
227 close(dev->intr_handle.fd);
228 if (dev->intr_handle.uio_cfg_fd >= 0) {
229 close(dev->intr_handle.uio_cfg_fd);
230 dev->intr_handle.uio_cfg_fd = -1;
233 dev->intr_handle.fd = -1;
234 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;