6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of NXP nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/types.h>
41 #include <sys/syscall.h>
43 #include <rte_config.h>
44 #include <rte_byteorder.h>
45 #include <rte_common.h>
46 #include <rte_interrupts.h>
48 #include <rte_debug.h>
50 #include <rte_atomic.h>
51 #include <rte_branch_prediction.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_tailq.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
63 #include <rte_dpaa_bus.h>
64 #include <rte_dpaa_logs.h>
73 int dpaa_logtype_mempool;
76 struct rte_dpaa_bus rte_dpaa_bus;
77 struct netcfg_info *dpaa_netcfg;
79 /* define a variable to hold the portal_key, once created.*/
80 pthread_key_t dpaa_portal_key;
82 RTE_DEFINE_PER_LCORE(bool, _dpaa_io);
85 dpaa_add_to_device_list(struct rte_dpaa_device *dev)
87 TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
91 dpaa_remove_from_device_list(struct rte_dpaa_device *dev)
93 TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
96 static void dpaa_clean_device_list(void);
99 dpaa_create_device_list(void)
103 struct rte_dpaa_device *dev;
104 struct fm_eth_port_cfg *cfg;
105 struct fman_if *fman_intf;
107 /* Creating Ethernet Devices */
108 for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
109 dev = calloc(1, sizeof(struct rte_dpaa_device));
111 DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
116 cfg = &dpaa_netcfg->port_cfg[i];
117 fman_intf = cfg->fman_if;
119 /* Device identifiers */
120 dev->id.fman_id = fman_intf->fman_idx + 1;
121 dev->id.mac_id = fman_intf->mac_idx;
122 dev->device_type = FSL_DPAA_ETH;
125 /* Create device name */
126 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
127 sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
129 DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
130 dev->device.name = dev->name;
132 dpaa_add_to_device_list(dev);
135 rte_dpaa_bus.device_count = i;
140 dpaa_clean_device_list();
145 dpaa_clean_device_list(void)
147 struct rte_dpaa_device *dev = NULL;
148 struct rte_dpaa_device *tdev = NULL;
150 TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
151 TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
157 /** XXX move this function into a separate file */
159 _dpaa_portal_init(void *arg)
163 uint32_t cpu = rte_lcore_id();
165 struct dpaa_portal *dpaa_io_portal;
167 BUS_INIT_FUNC_TRACE();
169 if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY)
170 cpu = rte_get_master_lcore();
171 /* if the core id is not supported */
173 if (cpu >= RTE_MAX_LCORE)
176 /* Set CPU affinity for this thread */
178 CPU_SET(cpu, &cpuset);
180 ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
182 DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on "
183 "core :%d with ret: %d", cpu, ret);
187 /* Initialise bman thread portals */
188 ret = bman_thread_init();
190 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
191 "core %d with ret: %d", cpu, ret);
195 DPAA_BUS_LOG(DEBUG, "BMAN thread initialized");
197 /* Initialise qman thread portals */
198 ret = qman_thread_init();
200 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
201 "core %d with ret: %d", cpu, ret);
202 bman_thread_finish();
206 DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
208 dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
209 RTE_CACHE_LINE_SIZE);
210 if (!dpaa_io_portal) {
211 DPAA_BUS_LOG(ERR, "Unable to allocate memory");
212 bman_thread_finish();
213 qman_thread_finish();
217 dpaa_io_portal->qman_idx = qman_get_portal_index();
218 dpaa_io_portal->bman_idx = bman_get_portal_index();
219 dpaa_io_portal->tid = syscall(SYS_gettid);
221 ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
223 DPAA_BUS_LOG(ERR, "pthread_setspecific failed on "
224 "core %d with ret: %d", cpu, ret);
225 dpaa_portal_finish(NULL);
230 RTE_PER_LCORE(_dpaa_io) = true;
232 DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
238 * rte_dpaa_portal_init - Wrapper over _dpaa_portal_init with thread level check
242 rte_dpaa_portal_init(void *arg)
244 if (unlikely(!RTE_PER_LCORE(_dpaa_io)))
245 return _dpaa_portal_init(arg);
251 dpaa_portal_finish(void *arg)
253 struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
255 if (!dpaa_io_portal) {
256 DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
260 bman_thread_finish();
261 qman_thread_finish();
263 pthread_setspecific(dpaa_portal_key, NULL);
265 rte_free(dpaa_io_portal);
266 dpaa_io_portal = NULL;
268 RTE_PER_LCORE(_dpaa_io) = false;
271 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
272 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
275 rte_dpaa_bus_scan(void)
279 BUS_INIT_FUNC_TRACE();
281 if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
282 (access(DPAA_DEV_PATH2, F_OK) != 0)) {
283 RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
287 /* Load the device-tree driver */
290 DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
294 /* Get the interface configurations from device-tree */
295 dpaa_netcfg = netcfg_acquire();
297 DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
301 RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
303 if (!dpaa_netcfg->num_ethports) {
304 DPAA_BUS_LOG(INFO, "no network interfaces available");
305 /* This is not an error */
309 DPAA_BUS_LOG(DEBUG, "Bus: Address of netcfg=%p, Ethports=%d",
310 dpaa_netcfg, dpaa_netcfg->num_ethports);
312 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
313 dump_netcfg(dpaa_netcfg);
316 DPAA_BUS_LOG(DEBUG, "Number of devices = %d\n",
317 dpaa_netcfg->num_ethports);
318 ret = dpaa_create_device_list();
320 DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
324 /* create the key, supplying a function that'll be invoked
325 * when a portal affined thread will be deleted.
327 ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
329 DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
330 dpaa_clean_device_list();
334 DPAA_BUS_LOG(DEBUG, "dpaa_portal_key=%u, ret=%d\n",
335 (unsigned int)dpaa_portal_key, ret);
340 /* register a dpaa bus based dpaa driver */
342 rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
346 BUS_INIT_FUNC_TRACE();
348 TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
349 /* Update Bus references */
350 driver->dpaa_bus = &rte_dpaa_bus;
353 /* un-register a dpaa bus based dpaa driver */
355 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
357 struct rte_dpaa_bus *dpaa_bus;
359 BUS_INIT_FUNC_TRACE();
361 dpaa_bus = driver->dpaa_bus;
363 TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
364 /* Update Bus references */
365 driver->dpaa_bus = NULL;
369 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
370 struct rte_dpaa_device *dev)
374 BUS_INIT_FUNC_TRACE();
377 DPAA_BUS_DEBUG("Invalid drv or dev received.");
381 if (drv->drv_type == dev->device_type) {
382 DPAA_BUS_INFO("Device: %s matches for driver: %s",
383 dev->name, drv->driver.name);
384 ret = 0; /* Found a match */
391 rte_dpaa_bus_probe(void)
394 struct rte_dpaa_device *dev;
395 struct rte_dpaa_driver *drv;
397 BUS_INIT_FUNC_TRACE();
399 /* For each registered driver, and device, call the driver->probe */
400 TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
401 TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
402 ret = rte_dpaa_device_match(drv, dev);
409 ret = drv->probe(drv, dev);
411 DPAA_BUS_ERR("Unable to probe.\n");
418 static struct rte_device *
419 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
422 struct rte_dpaa_device *dev;
424 TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
425 if (start && &dev->device == start) {
426 start = NULL; /* starting point found */
430 if (cmp(&dev->device, data) == 0)
437 struct rte_dpaa_bus rte_dpaa_bus = {
439 .scan = rte_dpaa_bus_scan,
440 .probe = rte_dpaa_bus_probe,
441 .find_device = rte_dpaa_find_device,
443 .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
444 .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
448 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
450 RTE_INIT(dpaa_init_log);
454 dpaa_logtype_bus = rte_log_register("bus.dpaa");
455 if (dpaa_logtype_bus >= 0)
456 rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
458 dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
459 if (dpaa_logtype_mempool >= 0)
460 rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
462 dpaa_logtype_pmd = rte_log_register("pmd.dpaa");
463 if (dpaa_logtype_pmd >= 0)
464 rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);