bus/dpaa: fix clang warnings
[dpdk.git] / drivers / bus / dpaa / dpaa_bus.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017 NXP
4  *
5  */
6 /* System headers */
7 #include <stdio.h>
8 #include <inttypes.h>
9 #include <unistd.h>
10 #include <limits.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <pthread.h>
14 #include <sys/types.h>
15 #include <sys/syscall.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_pci.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_memory.h>
26 #include <rte_tailq.h>
27 #include <rte_eal.h>
28 #include <rte_alarm.h>
29 #include <rte_ether.h>
30 #include <rte_ethdev_driver.h>
31 #include <rte_malloc.h>
32 #include <rte_ring.h>
33 #include <rte_bus.h>
34 #include <rte_mbuf_pool_ops.h>
35
36 #include <rte_dpaa_bus.h>
37 #include <rte_dpaa_logs.h>
38
39 #include <fsl_usd.h>
40 #include <fsl_qman.h>
41 #include <fsl_bman.h>
42 #include <of.h>
43 #include <netcfg.h>
44
45 int dpaa_logtype_bus;
46 int dpaa_logtype_mempool;
47 int dpaa_logtype_pmd;
48 int dpaa_logtype_eventdev;
49
50 struct rte_dpaa_bus rte_dpaa_bus;
51 struct netcfg_info *dpaa_netcfg;
52
53 /* define a variable to hold the portal_key, once created.*/
54 pthread_key_t dpaa_portal_key;
55
56 unsigned int dpaa_svr_family;
57
58 RTE_DEFINE_PER_LCORE(bool, _dpaa_io);
59 RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
60
61 static inline void
62 dpaa_add_to_device_list(struct rte_dpaa_device *dev)
63 {
64         TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
65 }
66
67 /*
68  * Reads the SEC device from DTS
69  * Returns -1 if SEC devices not available, 0 otherwise
70  */
71 static inline int
72 dpaa_sec_available(void)
73 {
74         const struct device_node *caam_node;
75
76         for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
77                 return 0;
78         }
79
80         return -1;
81 }
82
83 static void dpaa_clean_device_list(void);
84
85 static int
86 dpaa_create_device_list(void)
87 {
88         int i;
89         int ret;
90         struct rte_dpaa_device *dev;
91         struct fm_eth_port_cfg *cfg;
92         struct fman_if *fman_intf;
93
94         /* Creating Ethernet Devices */
95         for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
96                 dev = calloc(1, sizeof(struct rte_dpaa_device));
97                 if (!dev) {
98                         DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
99                         ret = -ENOMEM;
100                         goto cleanup;
101                 }
102
103                 cfg = &dpaa_netcfg->port_cfg[i];
104                 fman_intf = cfg->fman_if;
105
106                 /* Device identifiers */
107                 dev->id.fman_id = fman_intf->fman_idx + 1;
108                 dev->id.mac_id = fman_intf->mac_idx;
109                 dev->device_type = FSL_DPAA_ETH;
110                 dev->id.dev_id = i;
111
112                 /* Create device name */
113                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
114                 sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
115                         fman_intf->mac_idx);
116                 DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
117                 dev->device.name = dev->name;
118
119                 dpaa_add_to_device_list(dev);
120         }
121
122         rte_dpaa_bus.device_count = i;
123
124         /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
125          * constantly created only if "sec" property is found in the device
126          * tree. Logically there is no limit for number of devices (QI
127          * interfaces) that can be created.
128          */
129
130         if (dpaa_sec_available()) {
131                 DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
132                 return 0;
133         }
134
135         /* Creating SEC Devices */
136         for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
137                 dev = calloc(1, sizeof(struct rte_dpaa_device));
138                 if (!dev) {
139                         DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
140                         ret = -1;
141                         goto cleanup;
142                 }
143
144                 dev->device_type = FSL_DPAA_CRYPTO;
145                 dev->id.dev_id = rte_dpaa_bus.device_count + i;
146
147                 /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
148                  * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
149                  * allocated for dev->name/
150                  */
151                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
152                 sprintf(dev->name, "dpaa-sec%d", i);
153                 DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
154
155                 dpaa_add_to_device_list(dev);
156         }
157
158         rte_dpaa_bus.device_count += i;
159
160         return 0;
161
162 cleanup:
163         dpaa_clean_device_list();
164         return ret;
165 }
166
167 static void
168 dpaa_clean_device_list(void)
169 {
170         struct rte_dpaa_device *dev = NULL;
171         struct rte_dpaa_device *tdev = NULL;
172
173         TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
174                 TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
175                 free(dev);
176                 dev = NULL;
177         }
178 }
179
180 /** XXX move this function into a separate file */
181 static int
182 _dpaa_portal_init(void *arg)
183 {
184         cpu_set_t cpuset;
185         pthread_t id;
186         uint32_t cpu = rte_lcore_id();
187         int ret;
188         struct dpaa_portal *dpaa_io_portal;
189
190         BUS_INIT_FUNC_TRACE();
191
192         if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY)
193                 cpu = rte_get_master_lcore();
194         /* if the core id is not supported */
195         else
196                 if (cpu >= RTE_MAX_LCORE)
197                         return -1;
198
199         /* Set CPU affinity for this thread */
200         CPU_ZERO(&cpuset);
201         CPU_SET(cpu, &cpuset);
202         id = pthread_self();
203         ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
204         if (ret) {
205                 DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on "
206                         "core :%d with ret: %d", cpu, ret);
207                 return ret;
208         }
209
210         /* Initialise bman thread portals */
211         ret = bman_thread_init();
212         if (ret) {
213                 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
214                         "core %d with ret: %d", cpu, ret);
215                 return ret;
216         }
217
218         DPAA_BUS_LOG(DEBUG, "BMAN thread initialized");
219
220         /* Initialise qman thread portals */
221         ret = qman_thread_init();
222         if (ret) {
223                 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
224                         "core %d with ret: %d", cpu, ret);
225                 bman_thread_finish();
226                 return ret;
227         }
228
229         DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
230
231         dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
232                                     RTE_CACHE_LINE_SIZE);
233         if (!dpaa_io_portal) {
234                 DPAA_BUS_LOG(ERR, "Unable to allocate memory");
235                 bman_thread_finish();
236                 qman_thread_finish();
237                 return -ENOMEM;
238         }
239
240         dpaa_io_portal->qman_idx = qman_get_portal_index();
241         dpaa_io_portal->bman_idx = bman_get_portal_index();
242         dpaa_io_portal->tid = syscall(SYS_gettid);
243
244         ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
245         if (ret) {
246                 DPAA_BUS_LOG(ERR, "pthread_setspecific failed on "
247                             "core %d with ret: %d", cpu, ret);
248                 dpaa_portal_finish(NULL);
249
250                 return ret;
251         }
252
253         RTE_PER_LCORE(_dpaa_io) = true;
254
255         DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
256
257         return 0;
258 }
259
260 /*
261  * rte_dpaa_portal_init - Wrapper over _dpaa_portal_init with thread level check
262  * XXX Complete this
263  */
264 int rte_dpaa_portal_init(void *arg)
265 {
266         if (unlikely(!RTE_PER_LCORE(_dpaa_io)))
267                 return _dpaa_portal_init(arg);
268
269         return 0;
270 }
271
272 int
273 rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
274 {
275         /* Affine above created portal with channel*/
276         u32 sdqcr;
277         struct qman_portal *qp;
278
279         if (unlikely(!RTE_PER_LCORE(_dpaa_io)))
280                 _dpaa_portal_init(arg);
281
282         /* Initialise qman specific portals */
283         qp = fsl_qman_portal_create();
284         if (!qp) {
285                 DPAA_BUS_LOG(ERR, "Unable to alloc fq portal");
286                 return -1;
287         }
288         fq->qp = qp;
289         sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
290         qman_static_dequeue_add(sdqcr, qp);
291
292         return 0;
293 }
294
295 int rte_dpaa_portal_fq_close(struct qman_fq *fq)
296 {
297         return fsl_qman_portal_destroy(fq->qp);
298 }
299
300 void
301 dpaa_portal_finish(void *arg)
302 {
303         struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
304
305         if (!dpaa_io_portal) {
306                 DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
307                 return;
308         }
309
310         bman_thread_finish();
311         qman_thread_finish();
312
313         pthread_setspecific(dpaa_portal_key, NULL);
314
315         rte_free(dpaa_io_portal);
316         dpaa_io_portal = NULL;
317
318         RTE_PER_LCORE(_dpaa_io) = false;
319 }
320
321 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
322 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
323
324 static int
325 rte_dpaa_bus_scan(void)
326 {
327         int ret;
328
329         BUS_INIT_FUNC_TRACE();
330
331         if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
332             (access(DPAA_DEV_PATH2, F_OK) != 0)) {
333                 RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
334                 return 0;
335         }
336
337         /* Load the device-tree driver */
338         ret = of_init();
339         if (ret) {
340                 DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
341                 return -1;
342         }
343
344         /* Get the interface configurations from device-tree */
345         dpaa_netcfg = netcfg_acquire();
346         if (!dpaa_netcfg) {
347                 DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
348                 return -EINVAL;
349         }
350
351         RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
352
353         if (!dpaa_netcfg->num_ethports) {
354                 DPAA_BUS_LOG(INFO, "no network interfaces available");
355                 /* This is not an error */
356                 return 0;
357         }
358
359         DPAA_BUS_LOG(DEBUG, "Bus: Address of netcfg=%p, Ethports=%d",
360                      dpaa_netcfg, dpaa_netcfg->num_ethports);
361
362 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
363         dump_netcfg(dpaa_netcfg);
364 #endif
365
366         DPAA_BUS_LOG(DEBUG, "Number of devices = %d\n",
367                      dpaa_netcfg->num_ethports);
368         ret = dpaa_create_device_list();
369         if (ret) {
370                 DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
371                 return ret;
372         }
373
374         /* create the key, supplying a function that'll be invoked
375          * when a portal affined thread will be deleted.
376          */
377         ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
378         if (ret) {
379                 DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
380                 dpaa_clean_device_list();
381                 return ret;
382         }
383
384         DPAA_BUS_LOG(DEBUG, "dpaa_portal_key=%u, ret=%d\n",
385                     (unsigned int)dpaa_portal_key, ret);
386
387         return 0;
388 }
389
390 /* register a dpaa bus based dpaa driver */
391 void
392 rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
393 {
394         RTE_VERIFY(driver);
395
396         BUS_INIT_FUNC_TRACE();
397
398         TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
399         /* Update Bus references */
400         driver->dpaa_bus = &rte_dpaa_bus;
401 }
402
403 /* un-register a dpaa bus based dpaa driver */
404 void
405 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
406 {
407         struct rte_dpaa_bus *dpaa_bus;
408
409         BUS_INIT_FUNC_TRACE();
410
411         dpaa_bus = driver->dpaa_bus;
412
413         TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
414         /* Update Bus references */
415         driver->dpaa_bus = NULL;
416 }
417
418 static int
419 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
420                       struct rte_dpaa_device *dev)
421 {
422         int ret = -1;
423
424         BUS_INIT_FUNC_TRACE();
425
426         if (!drv || !dev) {
427                 DPAA_BUS_DEBUG("Invalid drv or dev received.");
428                 return ret;
429         }
430
431         if (drv->drv_type == dev->device_type) {
432                 DPAA_BUS_INFO("Device: %s matches for driver: %s",
433                               dev->name, drv->driver.name);
434                 ret = 0; /* Found a match */
435         }
436
437         return ret;
438 }
439
440 static int
441 rte_dpaa_bus_probe(void)
442 {
443         int ret = -1;
444         struct rte_dpaa_device *dev;
445         struct rte_dpaa_driver *drv;
446         FILE *svr_file = NULL;
447         unsigned int svr_ver;
448
449         BUS_INIT_FUNC_TRACE();
450
451         /* For each registered driver, and device, call the driver->probe */
452         TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
453                 TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
454                         ret = rte_dpaa_device_match(drv, dev);
455                         if (ret)
456                                 continue;
457
458                         if (!drv->probe)
459                                 continue;
460
461                         ret = drv->probe(drv, dev);
462                         if (ret)
463                                 DPAA_BUS_ERR("Unable to probe.\n");
464                         break;
465                 }
466         }
467         rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
468
469         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
470         if (svr_file) {
471                 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
472                         dpaa_svr_family = svr_ver & SVR_MASK;
473                 fclose(svr_file);
474         }
475
476         return 0;
477 }
478
479 static struct rte_device *
480 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
481                      const void *data)
482 {
483         struct rte_dpaa_device *dev;
484
485         TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
486                 if (start && &dev->device == start) {
487                         start = NULL;  /* starting point found */
488                         continue;
489                 }
490
491                 if (cmp(&dev->device, data) == 0)
492                         return &dev->device;
493         }
494
495         return NULL;
496 }
497
498 /*
499  * Get iommu class of DPAA2 devices on the bus.
500  */
501 static enum rte_iova_mode
502 rte_dpaa_get_iommu_class(void)
503 {
504         return RTE_IOVA_PA;
505 }
506
507 struct rte_dpaa_bus rte_dpaa_bus = {
508         .bus = {
509                 .scan = rte_dpaa_bus_scan,
510                 .probe = rte_dpaa_bus_probe,
511                 .find_device = rte_dpaa_find_device,
512                 .get_iommu_class = rte_dpaa_get_iommu_class,
513         },
514         .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
515         .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
516         .device_count = 0,
517 };
518
519 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
520
521 RTE_INIT(dpaa_init_log);
522 static void
523 dpaa_init_log(void)
524 {
525         dpaa_logtype_bus = rte_log_register("bus.dpaa");
526         if (dpaa_logtype_bus >= 0)
527                 rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
528
529         dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
530         if (dpaa_logtype_mempool >= 0)
531                 rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
532
533         dpaa_logtype_pmd = rte_log_register("pmd.dpaa");
534         if (dpaa_logtype_pmd >= 0)
535                 rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
536
537         dpaa_logtype_eventdev = rte_log_register("eventdev.dpaa");
538         if (dpaa_logtype_eventdev >= 0)
539                 rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);
540 }