net/nfp: support hardware RSS v2
[dpdk.git] / drivers / bus / dpaa / dpaa_bus.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017 NXP
4  *
5  */
6 /* System headers */
7 #include <stdio.h>
8 #include <inttypes.h>
9 #include <unistd.h>
10 #include <limits.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <pthread.h>
14 #include <sys/types.h>
15 #include <sys/syscall.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_malloc.h>
31 #include <rte_ring.h>
32 #include <rte_bus.h>
33 #include <rte_mbuf_pool_ops.h>
34
35 #include <rte_dpaa_bus.h>
36 #include <rte_dpaa_logs.h>
37
38 #include <fsl_usd.h>
39 #include <fsl_qman.h>
40 #include <fsl_bman.h>
41 #include <of.h>
42 #include <netcfg.h>
43
44 int dpaa_logtype_bus;
45 int dpaa_logtype_mempool;
46 int dpaa_logtype_pmd;
47 int dpaa_logtype_eventdev;
48
49 struct rte_dpaa_bus rte_dpaa_bus;
50 struct netcfg_info *dpaa_netcfg;
51
52 /* define a variable to hold the portal_key, once created.*/
53 pthread_key_t dpaa_portal_key;
54
55 unsigned int dpaa_svr_family;
56
57 RTE_DEFINE_PER_LCORE(bool, dpaa_io);
58 RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
59
60 static int
61 compare_dpaa_devices(struct rte_dpaa_device *dev1,
62                      struct rte_dpaa_device *dev2)
63 {
64         int comp = 0;
65
66         /* Segragating ETH from SEC devices */
67         if (dev1->device_type > dev2->device_type)
68                 comp = 1;
69         else if (dev1->device_type < dev2->device_type)
70                 comp = -1;
71         else
72                 comp = 0;
73
74         if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
75                 return comp;
76
77         if (dev1->id.fman_id > dev2->id.fman_id) {
78                 comp = 1;
79         } else if (dev1->id.fman_id < dev2->id.fman_id) {
80                 comp = -1;
81         } else {
82                 /* FMAN ids match, check for mac_id */
83                 if (dev1->id.mac_id > dev2->id.mac_id)
84                         comp = 1;
85                 else if (dev1->id.mac_id < dev2->id.mac_id)
86                         comp = -1;
87                 else
88                         comp = 0;
89         }
90
91         return comp;
92 }
93
94 static inline void
95 dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
96 {
97         int comp, inserted = 0;
98         struct rte_dpaa_device *dev = NULL;
99         struct rte_dpaa_device *tdev = NULL;
100
101         TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
102                 comp = compare_dpaa_devices(newdev, dev);
103                 if (comp < 0) {
104                         TAILQ_INSERT_BEFORE(dev, newdev, next);
105                         inserted = 1;
106                         break;
107                 }
108         }
109
110         if (!inserted)
111                 TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
112 }
113
114 /*
115  * Reads the SEC device from DTS
116  * Returns -1 if SEC devices not available, 0 otherwise
117  */
118 static inline int
119 dpaa_sec_available(void)
120 {
121         const struct device_node *caam_node;
122
123         for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
124                 return 0;
125         }
126
127         return -1;
128 }
129
130 static void dpaa_clean_device_list(void);
131
132 static int
133 dpaa_create_device_list(void)
134 {
135         int i;
136         int ret;
137         struct rte_dpaa_device *dev;
138         struct fm_eth_port_cfg *cfg;
139         struct fman_if *fman_intf;
140
141         /* Creating Ethernet Devices */
142         for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
143                 dev = calloc(1, sizeof(struct rte_dpaa_device));
144                 if (!dev) {
145                         DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
146                         ret = -ENOMEM;
147                         goto cleanup;
148                 }
149
150                 cfg = &dpaa_netcfg->port_cfg[i];
151                 fman_intf = cfg->fman_if;
152
153                 /* Device identifiers */
154                 dev->id.fman_id = fman_intf->fman_idx + 1;
155                 dev->id.mac_id = fman_intf->mac_idx;
156                 dev->device_type = FSL_DPAA_ETH;
157                 dev->id.dev_id = i;
158
159                 /* Create device name */
160                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
161                 sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
162                         fman_intf->mac_idx);
163                 DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
164                 dev->device.name = dev->name;
165
166                 dpaa_add_to_device_list(dev);
167         }
168
169         rte_dpaa_bus.device_count = i;
170
171         /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
172          * constantly created only if "sec" property is found in the device
173          * tree. Logically there is no limit for number of devices (QI
174          * interfaces) that can be created.
175          */
176
177         if (dpaa_sec_available()) {
178                 DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
179                 return 0;
180         }
181
182         /* Creating SEC Devices */
183         for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
184                 dev = calloc(1, sizeof(struct rte_dpaa_device));
185                 if (!dev) {
186                         DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
187                         ret = -1;
188                         goto cleanup;
189                 }
190
191                 dev->device_type = FSL_DPAA_CRYPTO;
192                 dev->id.dev_id = rte_dpaa_bus.device_count + i;
193
194                 /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
195                  * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
196                  * allocated for dev->name/
197                  */
198                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
199                 sprintf(dev->name, "dpaa-sec%d", i);
200                 DPAA_BUS_LOG(DEBUG, "Device added: %s", dev->name);
201
202                 dpaa_add_to_device_list(dev);
203         }
204
205         rte_dpaa_bus.device_count += i;
206
207         return 0;
208
209 cleanup:
210         dpaa_clean_device_list();
211         return ret;
212 }
213
214 static void
215 dpaa_clean_device_list(void)
216 {
217         struct rte_dpaa_device *dev = NULL;
218         struct rte_dpaa_device *tdev = NULL;
219
220         TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
221                 TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
222                 free(dev);
223                 dev = NULL;
224         }
225 }
226
227 int rte_dpaa_portal_init(void *arg)
228 {
229         cpu_set_t cpuset;
230         pthread_t id;
231         uint32_t cpu = rte_lcore_id();
232         int ret;
233         struct dpaa_portal *dpaa_io_portal;
234
235         BUS_INIT_FUNC_TRACE();
236
237         if ((size_t)arg == 1 || cpu == LCORE_ID_ANY)
238                 cpu = rte_get_master_lcore();
239         /* if the core id is not supported */
240         else
241                 if (cpu >= RTE_MAX_LCORE)
242                         return -1;
243
244         /* Set CPU affinity for this thread */
245         CPU_ZERO(&cpuset);
246         CPU_SET(cpu, &cpuset);
247         id = pthread_self();
248         ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
249         if (ret) {
250                 DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on "
251                         "core :%d with ret: %d", cpu, ret);
252                 return ret;
253         }
254
255         /* Initialise bman thread portals */
256         ret = bman_thread_init();
257         if (ret) {
258                 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
259                         "core %d with ret: %d", cpu, ret);
260                 return ret;
261         }
262
263         DPAA_BUS_LOG(DEBUG, "BMAN thread initialized");
264
265         /* Initialise qman thread portals */
266         ret = qman_thread_init();
267         if (ret) {
268                 DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
269                         "core %d with ret: %d", cpu, ret);
270                 bman_thread_finish();
271                 return ret;
272         }
273
274         DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
275
276         dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
277                                     RTE_CACHE_LINE_SIZE);
278         if (!dpaa_io_portal) {
279                 DPAA_BUS_LOG(ERR, "Unable to allocate memory");
280                 bman_thread_finish();
281                 qman_thread_finish();
282                 return -ENOMEM;
283         }
284
285         dpaa_io_portal->qman_idx = qman_get_portal_index();
286         dpaa_io_portal->bman_idx = bman_get_portal_index();
287         dpaa_io_portal->tid = syscall(SYS_gettid);
288
289         ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
290         if (ret) {
291                 DPAA_BUS_LOG(ERR, "pthread_setspecific failed on "
292                             "core %d with ret: %d", cpu, ret);
293                 dpaa_portal_finish(NULL);
294
295                 return ret;
296         }
297
298         RTE_PER_LCORE(dpaa_io) = true;
299
300         DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
301
302         return 0;
303 }
304
305 int
306 rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
307 {
308         /* Affine above created portal with channel*/
309         u32 sdqcr;
310         struct qman_portal *qp;
311         int ret;
312
313         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
314                 ret = rte_dpaa_portal_init(arg);
315                 if (ret < 0) {
316                         DPAA_BUS_LOG(ERR, "portal initialization failure");
317                         return ret;
318                 }
319         }
320
321         /* Initialise qman specific portals */
322         qp = fsl_qman_portal_create();
323         if (!qp) {
324                 DPAA_BUS_LOG(ERR, "Unable to alloc fq portal");
325                 return -1;
326         }
327         fq->qp = qp;
328         sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
329         qman_static_dequeue_add(sdqcr, qp);
330
331         return 0;
332 }
333
334 int rte_dpaa_portal_fq_close(struct qman_fq *fq)
335 {
336         return fsl_qman_portal_destroy(fq->qp);
337 }
338
339 void
340 dpaa_portal_finish(void *arg)
341 {
342         struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
343
344         if (!dpaa_io_portal) {
345                 DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
346                 return;
347         }
348
349         bman_thread_finish();
350         qman_thread_finish();
351
352         pthread_setspecific(dpaa_portal_key, NULL);
353
354         rte_free(dpaa_io_portal);
355         dpaa_io_portal = NULL;
356
357         RTE_PER_LCORE(dpaa_io) = false;
358 }
359
360 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
361 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
362
363 static int
364 rte_dpaa_bus_scan(void)
365 {
366         int ret;
367
368         BUS_INIT_FUNC_TRACE();
369
370         if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
371             (access(DPAA_DEV_PATH2, F_OK) != 0)) {
372                 RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
373                 return 0;
374         }
375
376         /* Load the device-tree driver */
377         ret = of_init();
378         if (ret) {
379                 DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
380                 return -1;
381         }
382
383         /* Get the interface configurations from device-tree */
384         dpaa_netcfg = netcfg_acquire();
385         if (!dpaa_netcfg) {
386                 DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
387                 return -EINVAL;
388         }
389
390         RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
391
392         if (!dpaa_netcfg->num_ethports) {
393                 DPAA_BUS_LOG(INFO, "no network interfaces available");
394                 /* This is not an error */
395                 return 0;
396         }
397
398         DPAA_BUS_LOG(DEBUG, "Bus: Address of netcfg=%p, Ethports=%d",
399                      dpaa_netcfg, dpaa_netcfg->num_ethports);
400
401 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
402         dump_netcfg(dpaa_netcfg);
403 #endif
404
405         DPAA_BUS_LOG(DEBUG, "Number of devices = %d\n",
406                      dpaa_netcfg->num_ethports);
407         ret = dpaa_create_device_list();
408         if (ret) {
409                 DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
410                 return ret;
411         }
412
413         /* create the key, supplying a function that'll be invoked
414          * when a portal affined thread will be deleted.
415          */
416         ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
417         if (ret) {
418                 DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
419                 dpaa_clean_device_list();
420                 return ret;
421         }
422
423         DPAA_BUS_LOG(DEBUG, "dpaa_portal_key=%u, ret=%d\n",
424                     (unsigned int)dpaa_portal_key, ret);
425
426         return 0;
427 }
428
429 /* register a dpaa bus based dpaa driver */
430 void
431 rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
432 {
433         RTE_VERIFY(driver);
434
435         BUS_INIT_FUNC_TRACE();
436
437         TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
438         /* Update Bus references */
439         driver->dpaa_bus = &rte_dpaa_bus;
440 }
441
442 /* un-register a dpaa bus based dpaa driver */
443 void
444 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
445 {
446         struct rte_dpaa_bus *dpaa_bus;
447
448         BUS_INIT_FUNC_TRACE();
449
450         dpaa_bus = driver->dpaa_bus;
451
452         TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
453         /* Update Bus references */
454         driver->dpaa_bus = NULL;
455 }
456
457 static int
458 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
459                       struct rte_dpaa_device *dev)
460 {
461         int ret = -1;
462
463         BUS_INIT_FUNC_TRACE();
464
465         if (!drv || !dev) {
466                 DPAA_BUS_DEBUG("Invalid drv or dev received.");
467                 return ret;
468         }
469
470         if (drv->drv_type == dev->device_type) {
471                 DPAA_BUS_INFO("Device: %s matches for driver: %s",
472                               dev->name, drv->driver.name);
473                 ret = 0; /* Found a match */
474         }
475
476         return ret;
477 }
478
479 static int
480 rte_dpaa_bus_probe(void)
481 {
482         int ret = -1;
483         struct rte_dpaa_device *dev;
484         struct rte_dpaa_driver *drv;
485         FILE *svr_file = NULL;
486         unsigned int svr_ver;
487
488         BUS_INIT_FUNC_TRACE();
489
490         /* For each registered driver, and device, call the driver->probe */
491         TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
492                 TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
493                         ret = rte_dpaa_device_match(drv, dev);
494                         if (ret)
495                                 continue;
496
497                         if (!drv->probe)
498                                 continue;
499
500                         ret = drv->probe(drv, dev);
501                         if (ret)
502                                 DPAA_BUS_ERR("Unable to probe.\n");
503
504                         break;
505                 }
506         }
507
508         /* Register DPAA mempool ops only if any DPAA device has
509          * been detected.
510          */
511         if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list))
512                 rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
513
514         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
515         if (svr_file) {
516                 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
517                         dpaa_svr_family = svr_ver & SVR_MASK;
518                 fclose(svr_file);
519         }
520
521         return 0;
522 }
523
524 static struct rte_device *
525 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
526                      const void *data)
527 {
528         struct rte_dpaa_device *dev;
529
530         TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
531                 if (start && &dev->device == start) {
532                         start = NULL;  /* starting point found */
533                         continue;
534                 }
535
536                 if (cmp(&dev->device, data) == 0)
537                         return &dev->device;
538         }
539
540         return NULL;
541 }
542
543 /*
544  * Get iommu class of DPAA2 devices on the bus.
545  */
546 static enum rte_iova_mode
547 rte_dpaa_get_iommu_class(void)
548 {
549         if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
550             (access(DPAA_DEV_PATH2, F_OK) != 0)) {
551                 return RTE_IOVA_DC;
552         }
553         return RTE_IOVA_PA;
554 }
555
556 struct rte_dpaa_bus rte_dpaa_bus = {
557         .bus = {
558                 .scan = rte_dpaa_bus_scan,
559                 .probe = rte_dpaa_bus_probe,
560                 .find_device = rte_dpaa_find_device,
561                 .get_iommu_class = rte_dpaa_get_iommu_class,
562         },
563         .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
564         .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
565         .device_count = 0,
566 };
567
568 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
569
570 RTE_INIT(dpaa_init_log);
571 static void
572 dpaa_init_log(void)
573 {
574         dpaa_logtype_bus = rte_log_register("bus.dpaa");
575         if (dpaa_logtype_bus >= 0)
576                 rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
577
578         dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
579         if (dpaa_logtype_mempool >= 0)
580                 rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
581
582         dpaa_logtype_pmd = rte_log_register("pmd.dpaa");
583         if (dpaa_logtype_pmd >= 0)
584                 rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
585
586         dpaa_logtype_eventdev = rte_log_register("eventdev.dpaa");
587         if (dpaa_logtype_eventdev >= 0)
588                 rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);
589 }