1 /* SPDX-License-Identifier: BSD-3-Clause
14 #include <sys/types.h>
15 #include <sys/syscall.h>
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_interrupts.h>
21 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_tailq.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_malloc.h>
33 #include <rte_mbuf_pool_ops.h>
35 #include <rte_dpaa_bus.h>
36 #include <rte_dpaa_logs.h>
45 int dpaa_logtype_mempool;
47 int dpaa_logtype_eventdev;
49 struct rte_dpaa_bus rte_dpaa_bus;
50 struct netcfg_info *dpaa_netcfg;
52 /* define a variable to hold the portal_key, once created.*/
53 pthread_key_t dpaa_portal_key;
55 unsigned int dpaa_svr_family;
57 RTE_DEFINE_PER_LCORE(bool, dpaa_io);
58 RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
61 compare_dpaa_devices(struct rte_dpaa_device *dev1,
62 struct rte_dpaa_device *dev2)
66 /* Segragating ETH from SEC devices */
67 if (dev1->device_type > dev2->device_type)
69 else if (dev1->device_type < dev2->device_type)
74 if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
77 if (dev1->id.fman_id > dev2->id.fman_id) {
79 } else if (dev1->id.fman_id < dev2->id.fman_id) {
82 /* FMAN ids match, check for mac_id */
83 if (dev1->id.mac_id > dev2->id.mac_id)
85 else if (dev1->id.mac_id < dev2->id.mac_id)
95 dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
97 int comp, inserted = 0;
98 struct rte_dpaa_device *dev = NULL;
99 struct rte_dpaa_device *tdev = NULL;
101 TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
102 comp = compare_dpaa_devices(newdev, dev);
104 TAILQ_INSERT_BEFORE(dev, newdev, next);
111 TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
115 * Reads the SEC device from DTS
116 * Returns -1 if SEC devices not available, 0 otherwise
119 dpaa_sec_available(void)
121 const struct device_node *caam_node;
123 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
130 static void dpaa_clean_device_list(void);
133 dpaa_create_device_list(void)
137 struct rte_dpaa_device *dev;
138 struct fm_eth_port_cfg *cfg;
139 struct fman_if *fman_intf;
141 /* Creating Ethernet Devices */
142 for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
143 dev = calloc(1, sizeof(struct rte_dpaa_device));
145 DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
150 cfg = &dpaa_netcfg->port_cfg[i];
151 fman_intf = cfg->fman_if;
153 /* Device identifiers */
154 dev->id.fman_id = fman_intf->fman_idx + 1;
155 dev->id.mac_id = fman_intf->mac_idx;
156 dev->device_type = FSL_DPAA_ETH;
159 /* Create device name */
160 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
161 sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
163 DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
164 dev->device.name = dev->name;
166 dpaa_add_to_device_list(dev);
169 rte_dpaa_bus.device_count = i;
171 /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
172 * constantly created only if "sec" property is found in the device
173 * tree. Logically there is no limit for number of devices (QI
174 * interfaces) that can be created.
177 if (dpaa_sec_available()) {
178 DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
182 /* Creating SEC Devices */
183 for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
184 dev = calloc(1, sizeof(struct rte_dpaa_device));
186 DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
191 dev->device_type = FSL_DPAA_CRYPTO;
192 dev->id.dev_id = rte_dpaa_bus.device_count + i;
194 /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
195 * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
196 * allocated for dev->name/
198 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
199 sprintf(dev->name, "dpaa-sec%d", i);
200 DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
202 dpaa_add_to_device_list(dev);
205 rte_dpaa_bus.device_count += i;
210 dpaa_clean_device_list();
215 dpaa_clean_device_list(void)
217 struct rte_dpaa_device *dev = NULL;
218 struct rte_dpaa_device *tdev = NULL;
220 TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
221 TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
227 int rte_dpaa_portal_init(void *arg)
231 uint32_t cpu = rte_lcore_id();
233 struct dpaa_portal *dpaa_io_portal;
235 BUS_INIT_FUNC_TRACE();
237 if ((size_t)arg == 1 || cpu == LCORE_ID_ANY)
238 cpu = rte_get_master_lcore();
239 /* if the core id is not supported */
241 if (cpu >= RTE_MAX_LCORE)
244 /* Set CPU affinity for this thread */
246 CPU_SET(cpu, &cpuset);
248 ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
250 DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on "
251 "core :%d with ret: %d", cpu, ret);
255 /* Initialise bman thread portals */
256 ret = bman_thread_init();
258 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
259 "core %d with ret: %d", cpu, ret);
263 DPAA_BUS_LOG(DEBUG, "BMAN thread initialized");
265 /* Initialise qman thread portals */
266 ret = qman_thread_init();
268 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
269 "core %d with ret: %d", cpu, ret);
270 bman_thread_finish();
274 DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
276 dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
277 RTE_CACHE_LINE_SIZE);
278 if (!dpaa_io_portal) {
279 DPAA_BUS_LOG(ERR, "Unable to allocate memory");
280 bman_thread_finish();
281 qman_thread_finish();
285 dpaa_io_portal->qman_idx = qman_get_portal_index();
286 dpaa_io_portal->bman_idx = bman_get_portal_index();
287 dpaa_io_portal->tid = syscall(SYS_gettid);
289 ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
291 DPAA_BUS_LOG(ERR, "pthread_setspecific failed on "
292 "core %d with ret: %d", cpu, ret);
293 dpaa_portal_finish(NULL);
298 RTE_PER_LCORE(dpaa_io) = true;
300 DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
306 rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
308 /* Affine above created portal with channel*/
310 struct qman_portal *qp;
313 if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
314 ret = rte_dpaa_portal_init(arg);
316 DPAA_BUS_LOG(ERR, "portal initialization failure");
321 /* Initialise qman specific portals */
322 qp = fsl_qman_portal_create();
324 DPAA_BUS_LOG(ERR, "Unable to alloc fq portal");
328 sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
329 qman_static_dequeue_add(sdqcr, qp);
334 int rte_dpaa_portal_fq_close(struct qman_fq *fq)
336 return fsl_qman_portal_destroy(fq->qp);
340 dpaa_portal_finish(void *arg)
342 struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
344 if (!dpaa_io_portal) {
345 DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
349 bman_thread_finish();
350 qman_thread_finish();
352 pthread_setspecific(dpaa_portal_key, NULL);
354 rte_free(dpaa_io_portal);
355 dpaa_io_portal = NULL;
357 RTE_PER_LCORE(dpaa_io) = false;
360 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
361 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
364 rte_dpaa_bus_scan(void)
368 BUS_INIT_FUNC_TRACE();
370 if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
371 (access(DPAA_DEV_PATH2, F_OK) != 0)) {
372 RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
376 /* Load the device-tree driver */
379 DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
383 /* Get the interface configurations from device-tree */
384 dpaa_netcfg = netcfg_acquire();
386 DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
390 RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
392 if (!dpaa_netcfg->num_ethports) {
393 DPAA_BUS_LOG(INFO, "no network interfaces available");
394 /* This is not an error */
398 DPAA_BUS_LOG(DEBUG, "Bus: Address of netcfg=%p, Ethports=%d",
399 dpaa_netcfg, dpaa_netcfg->num_ethports);
401 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
402 dump_netcfg(dpaa_netcfg);
405 DPAA_BUS_LOG(DEBUG, "Number of devices = %d\n",
406 dpaa_netcfg->num_ethports);
407 ret = dpaa_create_device_list();
409 DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
413 /* create the key, supplying a function that'll be invoked
414 * when a portal affined thread will be deleted.
416 ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
418 DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
419 dpaa_clean_device_list();
423 DPAA_BUS_LOG(DEBUG, "dpaa_portal_key=%u, ret=%d\n",
424 (unsigned int)dpaa_portal_key, ret);
429 /* register a dpaa bus based dpaa driver */
431 rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
435 BUS_INIT_FUNC_TRACE();
437 TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
438 /* Update Bus references */
439 driver->dpaa_bus = &rte_dpaa_bus;
442 /* un-register a dpaa bus based dpaa driver */
444 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
446 struct rte_dpaa_bus *dpaa_bus;
448 BUS_INIT_FUNC_TRACE();
450 dpaa_bus = driver->dpaa_bus;
452 TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
453 /* Update Bus references */
454 driver->dpaa_bus = NULL;
458 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
459 struct rte_dpaa_device *dev)
463 BUS_INIT_FUNC_TRACE();
466 DPAA_BUS_DEBUG("Invalid drv or dev received.");
470 if (drv->drv_type == dev->device_type) {
471 DPAA_BUS_INFO("Device: %s matches for driver: %s",
472 dev->name, drv->driver.name);
473 ret = 0; /* Found a match */
480 rte_dpaa_bus_probe(void)
483 struct rte_dpaa_device *dev;
484 struct rte_dpaa_driver *drv;
485 FILE *svr_file = NULL;
486 unsigned int svr_ver;
488 BUS_INIT_FUNC_TRACE();
490 /* For each registered driver, and device, call the driver->probe */
491 TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
492 TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
493 ret = rte_dpaa_device_match(drv, dev);
500 ret = drv->probe(drv, dev);
502 DPAA_BUS_ERR("Unable to probe.\n");
508 /* Register DPAA mempool ops only if any DPAA device has
511 if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list))
512 rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
514 svr_file = fopen(DPAA_SOC_ID_FILE, "r");
516 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
517 dpaa_svr_family = svr_ver & SVR_MASK;
524 static struct rte_device *
525 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
528 struct rte_dpaa_device *dev;
530 TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
531 if (start && &dev->device == start) {
532 start = NULL; /* starting point found */
536 if (cmp(&dev->device, data) == 0)
544 * Get iommu class of DPAA2 devices on the bus.
546 static enum rte_iova_mode
547 rte_dpaa_get_iommu_class(void)
549 if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
550 (access(DPAA_DEV_PATH2, F_OK) != 0)) {
556 struct rte_dpaa_bus rte_dpaa_bus = {
558 .scan = rte_dpaa_bus_scan,
559 .probe = rte_dpaa_bus_probe,
560 .find_device = rte_dpaa_find_device,
561 .get_iommu_class = rte_dpaa_get_iommu_class,
563 .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
564 .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
568 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
570 RTE_INIT(dpaa_init_log);
574 dpaa_logtype_bus = rte_log_register("bus.dpaa");
575 if (dpaa_logtype_bus >= 0)
576 rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
578 dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
579 if (dpaa_logtype_mempool >= 0)
580 rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
582 dpaa_logtype_pmd = rte_log_register("pmd.dpaa");
583 if (dpaa_logtype_pmd >= 0)
584 rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
586 dpaa_logtype_eventdev = rte_log_register("eventdev.dpaa");
587 if (dpaa_logtype_eventdev >= 0)
588 rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);