eal: add bus pointer in device structure
[dpdk.git] / drivers / bus / dpaa / dpaa_bus.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017 NXP
4  *
5  */
6 /* System headers */
7 #include <stdio.h>
8 #include <inttypes.h>
9 #include <unistd.h>
10 #include <limits.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <pthread.h>
14 #include <sys/types.h>
15 #include <sys/syscall.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_malloc.h>
31 #include <rte_ring.h>
32 #include <rte_bus.h>
33 #include <rte_mbuf_pool_ops.h>
34
35 #include <rte_dpaa_bus.h>
36 #include <rte_dpaa_logs.h>
37
38 #include <fsl_usd.h>
39 #include <fsl_qman.h>
40 #include <fsl_bman.h>
41 #include <of.h>
42 #include <netcfg.h>
43
44 int dpaa_logtype_bus;
45 int dpaa_logtype_mempool;
46 int dpaa_logtype_pmd;
47 int dpaa_logtype_eventdev;
48
49 struct rte_dpaa_bus rte_dpaa_bus;
50 struct netcfg_info *dpaa_netcfg;
51
52 /* define a variable to hold the portal_key, once created.*/
53 static pthread_key_t dpaa_portal_key;
54
55 unsigned int dpaa_svr_family;
56
57 #define FSL_DPAA_BUS_NAME       dpaa_bus
58
59 RTE_DEFINE_PER_LCORE(bool, dpaa_io);
60 RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
61
62 static int
63 compare_dpaa_devices(struct rte_dpaa_device *dev1,
64                      struct rte_dpaa_device *dev2)
65 {
66         int comp = 0;
67
68         /* Segragating ETH from SEC devices */
69         if (dev1->device_type > dev2->device_type)
70                 comp = 1;
71         else if (dev1->device_type < dev2->device_type)
72                 comp = -1;
73         else
74                 comp = 0;
75
76         if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
77                 return comp;
78
79         if (dev1->id.fman_id > dev2->id.fman_id) {
80                 comp = 1;
81         } else if (dev1->id.fman_id < dev2->id.fman_id) {
82                 comp = -1;
83         } else {
84                 /* FMAN ids match, check for mac_id */
85                 if (dev1->id.mac_id > dev2->id.mac_id)
86                         comp = 1;
87                 else if (dev1->id.mac_id < dev2->id.mac_id)
88                         comp = -1;
89                 else
90                         comp = 0;
91         }
92
93         return comp;
94 }
95
96 static inline void
97 dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
98 {
99         int comp, inserted = 0;
100         struct rte_dpaa_device *dev = NULL;
101         struct rte_dpaa_device *tdev = NULL;
102
103         TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
104                 comp = compare_dpaa_devices(newdev, dev);
105                 if (comp < 0) {
106                         TAILQ_INSERT_BEFORE(dev, newdev, next);
107                         inserted = 1;
108                         break;
109                 }
110         }
111
112         if (!inserted)
113                 TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
114 }
115
116 /*
117  * Reads the SEC device from DTS
118  * Returns -1 if SEC devices not available, 0 otherwise
119  */
120 static inline int
121 dpaa_sec_available(void)
122 {
123         const struct device_node *caam_node;
124
125         for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
126                 return 0;
127         }
128
129         return -1;
130 }
131
132 static void dpaa_clean_device_list(void);
133
134 static struct rte_devargs *
135 dpaa_devargs_lookup(struct rte_dpaa_device *dev)
136 {
137         struct rte_devargs *devargs;
138         char dev_name[32];
139
140         RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
141                 devargs->bus->parse(devargs->name, &dev_name);
142                 if (strcmp(dev_name, dev->device.name) == 0) {
143                         DPAA_BUS_INFO("**Devargs matched %s", dev_name);
144                         return devargs;
145                 }
146         }
147         return NULL;
148 }
149
150 static int
151 dpaa_create_device_list(void)
152 {
153         int i;
154         int ret;
155         struct rte_dpaa_device *dev;
156         struct fm_eth_port_cfg *cfg;
157         struct fman_if *fman_intf;
158
159         /* Creating Ethernet Devices */
160         for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
161                 dev = calloc(1, sizeof(struct rte_dpaa_device));
162                 if (!dev) {
163                         DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
164                         ret = -ENOMEM;
165                         goto cleanup;
166                 }
167
168                 dev->device.bus = &rte_dpaa_bus.bus;
169
170                 cfg = &dpaa_netcfg->port_cfg[i];
171                 fman_intf = cfg->fman_if;
172
173                 /* Device identifiers */
174                 dev->id.fman_id = fman_intf->fman_idx + 1;
175                 dev->id.mac_id = fman_intf->mac_idx;
176                 dev->device_type = FSL_DPAA_ETH;
177                 dev->id.dev_id = i;
178
179                 /* Create device name */
180                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
181                 sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
182                         fman_intf->mac_idx);
183                 DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
184                 dev->device.name = dev->name;
185                 dev->device.devargs = dpaa_devargs_lookup(dev);
186
187                 dpaa_add_to_device_list(dev);
188         }
189
190         rte_dpaa_bus.device_count = i;
191
192         /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
193          * constantly created only if "sec" property is found in the device
194          * tree. Logically there is no limit for number of devices (QI
195          * interfaces) that can be created.
196          */
197
198         if (dpaa_sec_available()) {
199                 DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
200                 return 0;
201         }
202
203         /* Creating SEC Devices */
204         for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
205                 dev = calloc(1, sizeof(struct rte_dpaa_device));
206                 if (!dev) {
207                         DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
208                         ret = -1;
209                         goto cleanup;
210                 }
211
212                 dev->device_type = FSL_DPAA_CRYPTO;
213                 dev->id.dev_id = rte_dpaa_bus.device_count + i;
214
215                 /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
216                  * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
217                  * allocated for dev->name/
218                  */
219                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
220                 sprintf(dev->name, "dpaa-sec%d", i);
221                 DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
222                 dev->device.name = dev->name;
223                 dev->device.devargs = dpaa_devargs_lookup(dev);
224
225                 dpaa_add_to_device_list(dev);
226         }
227
228         rte_dpaa_bus.device_count += i;
229
230         return 0;
231
232 cleanup:
233         dpaa_clean_device_list();
234         return ret;
235 }
236
237 static void
238 dpaa_clean_device_list(void)
239 {
240         struct rte_dpaa_device *dev = NULL;
241         struct rte_dpaa_device *tdev = NULL;
242
243         TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
244                 TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
245                 free(dev);
246                 dev = NULL;
247         }
248 }
249
250 int rte_dpaa_portal_init(void *arg)
251 {
252         cpu_set_t cpuset;
253         pthread_t id;
254         uint32_t cpu = rte_lcore_id();
255         int ret;
256         struct dpaa_portal *dpaa_io_portal;
257
258         BUS_INIT_FUNC_TRACE();
259
260         if ((size_t)arg == 1 || cpu == LCORE_ID_ANY)
261                 cpu = rte_get_master_lcore();
262         /* if the core id is not supported */
263         else
264                 if (cpu >= RTE_MAX_LCORE)
265                         return -1;
266
267         /* Set CPU affinity for this thread */
268         CPU_ZERO(&cpuset);
269         CPU_SET(cpu, &cpuset);
270         id = pthread_self();
271         ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
272         if (ret) {
273                 DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on "
274                         "core :%d with ret: %d", cpu, ret);
275                 return ret;
276         }
277
278         /* Initialise bman thread portals */
279         ret = bman_thread_init();
280         if (ret) {
281                 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
282                         "core %d with ret: %d", cpu, ret);
283                 return ret;
284         }
285
286         DPAA_BUS_LOG(DEBUG, "BMAN thread initialized");
287
288         /* Initialise qman thread portals */
289         ret = qman_thread_init();
290         if (ret) {
291                 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
292                         "core %d with ret: %d", cpu, ret);
293                 bman_thread_finish();
294                 return ret;
295         }
296
297         DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
298
299         dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
300                                     RTE_CACHE_LINE_SIZE);
301         if (!dpaa_io_portal) {
302                 DPAA_BUS_LOG(ERR, "Unable to allocate memory");
303                 bman_thread_finish();
304                 qman_thread_finish();
305                 return -ENOMEM;
306         }
307
308         dpaa_io_portal->qman_idx = qman_get_portal_index();
309         dpaa_io_portal->bman_idx = bman_get_portal_index();
310         dpaa_io_portal->tid = syscall(SYS_gettid);
311
312         ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
313         if (ret) {
314                 DPAA_BUS_LOG(ERR, "pthread_setspecific failed on "
315                             "core %d with ret: %d", cpu, ret);
316                 dpaa_portal_finish(NULL);
317
318                 return ret;
319         }
320
321         RTE_PER_LCORE(dpaa_io) = true;
322
323         DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
324
325         return 0;
326 }
327
328 int
329 rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
330 {
331         /* Affine above created portal with channel*/
332         u32 sdqcr;
333         struct qman_portal *qp;
334         int ret;
335
336         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
337                 ret = rte_dpaa_portal_init(arg);
338                 if (ret < 0) {
339                         DPAA_BUS_LOG(ERR, "portal initialization failure");
340                         return ret;
341                 }
342         }
343
344         /* Initialise qman specific portals */
345         qp = fsl_qman_portal_create();
346         if (!qp) {
347                 DPAA_BUS_LOG(ERR, "Unable to alloc fq portal");
348                 return -1;
349         }
350         fq->qp = qp;
351         sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
352         qman_static_dequeue_add(sdqcr, qp);
353
354         return 0;
355 }
356
357 int rte_dpaa_portal_fq_close(struct qman_fq *fq)
358 {
359         return fsl_qman_portal_destroy(fq->qp);
360 }
361
362 void
363 dpaa_portal_finish(void *arg)
364 {
365         struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
366
367         if (!dpaa_io_portal) {
368                 DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
369                 return;
370         }
371
372         bman_thread_finish();
373         qman_thread_finish();
374
375         pthread_setspecific(dpaa_portal_key, NULL);
376
377         rte_free(dpaa_io_portal);
378         dpaa_io_portal = NULL;
379
380         RTE_PER_LCORE(dpaa_io) = false;
381 }
382
383 static int
384 rte_dpaa_bus_parse(const char *name, void *out_name)
385 {
386         int i, j;
387         int max_fman = 2, max_macs = 16;
388         char *sep = strchr(name, ':');
389
390         if (strncmp(name, RTE_STR(FSL_DPAA_BUS_NAME),
391                 strlen(RTE_STR(FSL_DPAA_BUS_NAME)))) {
392                 return -EINVAL;
393         }
394
395         if (!sep) {
396                 DPAA_BUS_ERR("Incorrect device name observed");
397                 return -EINVAL;
398         }
399
400         sep = (char *) (sep + 1);
401
402         for (i = 0; i < max_fman; i++) {
403                 for (j = 0; j < max_macs; j++) {
404                         char fm_name[16];
405                         snprintf(fm_name, 16, "fm%d-mac%d", i, j);
406                         if (strcmp(fm_name, sep) == 0) {
407                                 if (out_name)
408                                         strcpy(out_name, sep);
409                                 return 0;
410                         }
411                 }
412         }
413
414         for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
415                 char sec_name[16];
416
417                 snprintf(sec_name, 16, "dpaa-sec%d", i);
418                 if (strcmp(sec_name, sep) == 0) {
419                         if (out_name)
420                                 strcpy(out_name, sep);
421                         return 0;
422                 }
423         }
424
425         return -EINVAL;
426 }
427
428 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
429 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
430
431 static int
432 rte_dpaa_bus_scan(void)
433 {
434         int ret;
435
436         BUS_INIT_FUNC_TRACE();
437
438         if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
439             (access(DPAA_DEV_PATH2, F_OK) != 0)) {
440                 RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
441                 return 0;
442         }
443
444         /* Load the device-tree driver */
445         ret = of_init();
446         if (ret) {
447                 DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
448                 return -1;
449         }
450
451         /* Get the interface configurations from device-tree */
452         dpaa_netcfg = netcfg_acquire();
453         if (!dpaa_netcfg) {
454                 DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
455                 return -EINVAL;
456         }
457
458         RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
459
460         if (!dpaa_netcfg->num_ethports) {
461                 DPAA_BUS_LOG(INFO, "no network interfaces available");
462                 /* This is not an error */
463                 return 0;
464         }
465
466 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
467         dump_netcfg(dpaa_netcfg);
468 #endif
469
470         DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d",
471                      dpaa_netcfg->num_ethports);
472         ret = dpaa_create_device_list();
473         if (ret) {
474                 DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
475                 return ret;
476         }
477
478         /* create the key, supplying a function that'll be invoked
479          * when a portal affined thread will be deleted.
480          */
481         ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
482         if (ret) {
483                 DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
484                 dpaa_clean_device_list();
485                 return ret;
486         }
487
488         return 0;
489 }
490
491 /* register a dpaa bus based dpaa driver */
492 void
493 rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
494 {
495         RTE_VERIFY(driver);
496
497         BUS_INIT_FUNC_TRACE();
498
499         TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
500         /* Update Bus references */
501         driver->dpaa_bus = &rte_dpaa_bus;
502 }
503
504 /* un-register a dpaa bus based dpaa driver */
505 void
506 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
507 {
508         struct rte_dpaa_bus *dpaa_bus;
509
510         BUS_INIT_FUNC_TRACE();
511
512         dpaa_bus = driver->dpaa_bus;
513
514         TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
515         /* Update Bus references */
516         driver->dpaa_bus = NULL;
517 }
518
519 static int
520 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
521                       struct rte_dpaa_device *dev)
522 {
523         if (!drv || !dev) {
524                 DPAA_BUS_DEBUG("Invalid drv or dev received.");
525                 return -1;
526         }
527
528         if (drv->drv_type == dev->device_type)
529                 return 0;
530
531         return -1;
532 }
533
534 static int
535 rte_dpaa_bus_probe(void)
536 {
537         int ret = -1;
538         struct rte_dpaa_device *dev;
539         struct rte_dpaa_driver *drv;
540         FILE *svr_file = NULL;
541         unsigned int svr_ver;
542         int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
543
544         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
545         if (svr_file) {
546                 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
547                         dpaa_svr_family = svr_ver & SVR_MASK;
548                 fclose(svr_file);
549         }
550
551         /* For each registered driver, and device, call the driver->probe */
552         TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
553                 TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
554                         ret = rte_dpaa_device_match(drv, dev);
555                         if (ret)
556                                 continue;
557
558                         if (!drv->probe ||
559                             (dev->device.devargs &&
560                             dev->device.devargs->policy == RTE_DEV_BLACKLISTED))
561                                 continue;
562
563                         if (probe_all ||
564                             (dev->device.devargs &&
565                             dev->device.devargs->policy ==
566                             RTE_DEV_WHITELISTED)) {
567                                 ret = drv->probe(drv, dev);
568                                 if (ret) {
569                                         DPAA_BUS_ERR("Unable to probe.\n");
570                                 } else {
571                                         dev->driver = drv;
572                                         dev->device.driver = &drv->driver;
573                                 }
574                         }
575                         break;
576                 }
577         }
578
579         /* Register DPAA mempool ops only if any DPAA device has
580          * been detected.
581          */
582         if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list))
583                 rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
584
585         return 0;
586 }
587
588 static struct rte_device *
589 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
590                      const void *data)
591 {
592         struct rte_dpaa_device *dev;
593
594         TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
595                 if (start && &dev->device == start) {
596                         start = NULL;  /* starting point found */
597                         continue;
598                 }
599
600                 if (cmp(&dev->device, data) == 0)
601                         return &dev->device;
602         }
603
604         return NULL;
605 }
606
607 /*
608  * Get iommu class of DPAA2 devices on the bus.
609  */
610 static enum rte_iova_mode
611 rte_dpaa_get_iommu_class(void)
612 {
613         if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
614             (access(DPAA_DEV_PATH2, F_OK) != 0)) {
615                 return RTE_IOVA_DC;
616         }
617         return RTE_IOVA_PA;
618 }
619
620 struct rte_dpaa_bus rte_dpaa_bus = {
621         .bus = {
622                 .scan = rte_dpaa_bus_scan,
623                 .probe = rte_dpaa_bus_probe,
624                 .parse = rte_dpaa_bus_parse,
625                 .find_device = rte_dpaa_find_device,
626                 .get_iommu_class = rte_dpaa_get_iommu_class,
627         },
628         .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
629         .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
630         .device_count = 0,
631 };
632
633 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
634
635 RTE_INIT(dpaa_init_log)
636 {
637         dpaa_logtype_bus = rte_log_register("bus.dpaa");
638         if (dpaa_logtype_bus >= 0)
639                 rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
640
641         dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
642         if (dpaa_logtype_mempool >= 0)
643                 rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
644
645         dpaa_logtype_pmd = rte_log_register("pmd.net.dpaa");
646         if (dpaa_logtype_pmd >= 0)
647                 rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
648
649         dpaa_logtype_eventdev = rte_log_register("pmd.event.dpaa");
650         if (dpaa_logtype_eventdev >= 0)
651                 rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);
652 }