net/cnxk: add cn10k template Rx functions to build
[dpdk.git] / drivers / bus / dpaa / dpaa_bus.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017-2020 NXP
4  *
5  */
6 /* System headers */
7 #include <stdio.h>
8 #include <inttypes.h>
9 #include <unistd.h>
10 #include <limits.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <pthread.h>
14 #include <sys/types.h>
15 #include <sys/eventfd.h>
16
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <ethdev_driver.h>
30 #include <rte_malloc.h>
31 #include <rte_ring.h>
32 #include <rte_bus.h>
33 #include <rte_mbuf_pool_ops.h>
34 #include <rte_mbuf_dyn.h>
35
36 #include <dpaa_of.h>
37 #include <rte_dpaa_bus.h>
38 #include <rte_dpaa_logs.h>
39 #include <dpaax_iova_table.h>
40
41 #include <fsl_usd.h>
42 #include <fsl_qman.h>
43 #include <fsl_bman.h>
44 #include <netcfg.h>
45
46 static struct rte_dpaa_bus rte_dpaa_bus;
47 struct netcfg_info *dpaa_netcfg;
48
49 /* define a variable to hold the portal_key, once created.*/
50 static pthread_key_t dpaa_portal_key;
51
52 unsigned int dpaa_svr_family;
53
54 #define FSL_DPAA_BUS_NAME       dpaa_bus
55
56 RTE_DEFINE_PER_LCORE(struct dpaa_portal *, dpaa_io);
57
58 #define DPAA_SEQN_DYNFIELD_NAME "dpaa_seqn_dynfield"
59 int dpaa_seqn_dynfield_offset = -1;
60
61 struct fm_eth_port_cfg *
62 dpaa_get_eth_port_cfg(int dev_id)
63 {
64         return &dpaa_netcfg->port_cfg[dev_id];
65 }
66
67 static int
68 compare_dpaa_devices(struct rte_dpaa_device *dev1,
69                      struct rte_dpaa_device *dev2)
70 {
71         int comp = 0;
72
73         /* Segregating ETH from SEC devices */
74         if (dev1->device_type > dev2->device_type)
75                 comp = 1;
76         else if (dev1->device_type < dev2->device_type)
77                 comp = -1;
78         else
79                 comp = 0;
80
81         if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
82                 return comp;
83
84         if (dev1->id.fman_id > dev2->id.fman_id) {
85                 comp = 1;
86         } else if (dev1->id.fman_id < dev2->id.fman_id) {
87                 comp = -1;
88         } else {
89                 /* FMAN ids match, check for mac_id */
90                 if (dev1->id.mac_id > dev2->id.mac_id)
91                         comp = 1;
92                 else if (dev1->id.mac_id < dev2->id.mac_id)
93                         comp = -1;
94                 else
95                         comp = 0;
96         }
97
98         return comp;
99 }
100
101 static inline void
102 dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
103 {
104         int comp, inserted = 0;
105         struct rte_dpaa_device *dev = NULL;
106         struct rte_dpaa_device *tdev = NULL;
107
108         RTE_TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
109                 comp = compare_dpaa_devices(newdev, dev);
110                 if (comp < 0) {
111                         TAILQ_INSERT_BEFORE(dev, newdev, next);
112                         inserted = 1;
113                         break;
114                 }
115         }
116
117         if (!inserted)
118                 TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
119 }
120
121 /*
122  * Reads the SEC device from DTS
123  * Returns -1 if SEC devices not available, 0 otherwise
124  */
125 static inline int
126 dpaa_sec_available(void)
127 {
128         const struct device_node *caam_node;
129
130         for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
131                 return 0;
132         }
133
134         return -1;
135 }
136
137 static void dpaa_clean_device_list(void);
138
139 static struct rte_devargs *
140 dpaa_devargs_lookup(struct rte_dpaa_device *dev)
141 {
142         struct rte_devargs *devargs;
143         char dev_name[32];
144
145         RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
146                 devargs->bus->parse(devargs->name, &dev_name);
147                 if (strcmp(dev_name, dev->device.name) == 0) {
148                         DPAA_BUS_INFO("**Devargs matched %s", dev_name);
149                         return devargs;
150                 }
151         }
152         return NULL;
153 }
154
155 static int
156 dpaa_create_device_list(void)
157 {
158         int i;
159         int ret;
160         struct rte_dpaa_device *dev;
161         struct fm_eth_port_cfg *cfg;
162         struct fman_if *fman_intf;
163
164         /* Creating Ethernet Devices */
165         for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
166                 dev = calloc(1, sizeof(struct rte_dpaa_device));
167                 if (!dev) {
168                         DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
169                         ret = -ENOMEM;
170                         goto cleanup;
171                 }
172
173                 dev->device.bus = &rte_dpaa_bus.bus;
174
175                 /* Allocate interrupt handle instance */
176                 dev->intr_handle =
177                         rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
178                 if (dev->intr_handle == NULL) {
179                         DPAA_BUS_LOG(ERR, "Failed to allocate intr handle");
180                         ret = -ENOMEM;
181                         goto cleanup;
182                 }
183
184                 cfg = &dpaa_netcfg->port_cfg[i];
185                 fman_intf = cfg->fman_if;
186
187                 /* Device identifiers */
188                 dev->id.fman_id = fman_intf->fman_idx + 1;
189                 dev->id.mac_id = fman_intf->mac_idx;
190                 dev->device_type = FSL_DPAA_ETH;
191                 dev->id.dev_id = i;
192
193                 /* Create device name */
194                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
195                 sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
196                         fman_intf->mac_idx);
197                 DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
198                 dev->device.name = dev->name;
199                 dev->device.devargs = dpaa_devargs_lookup(dev);
200
201                 dpaa_add_to_device_list(dev);
202         }
203
204         rte_dpaa_bus.device_count = i;
205
206         /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
207          * constantly created only if "sec" property is found in the device
208          * tree. Logically there is no limit for number of devices (QI
209          * interfaces) that can be created.
210          */
211
212         if (dpaa_sec_available()) {
213                 DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
214                 return 0;
215         }
216
217         /* Creating SEC Devices */
218         for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
219                 dev = calloc(1, sizeof(struct rte_dpaa_device));
220                 if (!dev) {
221                         DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
222                         ret = -1;
223                         goto cleanup;
224                 }
225
226                 /* Allocate interrupt handle instance */
227                 dev->intr_handle =
228                         rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_PRIVATE);
229                 if (dev->intr_handle == NULL) {
230                         DPAA_BUS_LOG(ERR, "Failed to allocate intr handle");
231                         ret = -ENOMEM;
232                         goto cleanup;
233                 }
234
235                 dev->device_type = FSL_DPAA_CRYPTO;
236                 dev->id.dev_id = rte_dpaa_bus.device_count + i;
237
238                 /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
239                  * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
240                  * allocated for dev->name/
241                  */
242                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
243                 sprintf(dev->name, "dpaa_sec-%d", i+1);
244                 DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
245                 dev->device.name = dev->name;
246                 dev->device.devargs = dpaa_devargs_lookup(dev);
247
248                 dpaa_add_to_device_list(dev);
249         }
250
251         rte_dpaa_bus.device_count += i;
252
253         /* Creating QDMA Device */
254         for (i = 0; i < RTE_DPAA_QDMA_DEVICES; i++) {
255                 dev = calloc(1, sizeof(struct rte_dpaa_device));
256                 if (!dev) {
257                         DPAA_BUS_LOG(ERR, "Failed to allocate QDMA device");
258                         ret = -1;
259                         goto cleanup;
260                 }
261
262                 dev->device_type = FSL_DPAA_QDMA;
263                 dev->id.dev_id = rte_dpaa_bus.device_count + i;
264
265                 memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
266                 sprintf(dev->name, "dpaa_qdma-%d", i+1);
267                 DPAA_BUS_LOG(INFO, "%s qdma device added", dev->name);
268                 dev->device.name = dev->name;
269                 dev->device.devargs = dpaa_devargs_lookup(dev);
270
271                 dpaa_add_to_device_list(dev);
272         }
273         rte_dpaa_bus.device_count += i;
274
275         return 0;
276
277 cleanup:
278         dpaa_clean_device_list();
279         return ret;
280 }
281
282 static void
283 dpaa_clean_device_list(void)
284 {
285         struct rte_dpaa_device *dev = NULL;
286         struct rte_dpaa_device *tdev = NULL;
287
288         RTE_TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
289                 TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
290                 rte_intr_instance_free(dev->intr_handle);
291                 free(dev);
292                 dev = NULL;
293         }
294 }
295
296 int rte_dpaa_portal_init(void *arg)
297 {
298         static const struct rte_mbuf_dynfield dpaa_seqn_dynfield_desc = {
299                 .name = DPAA_SEQN_DYNFIELD_NAME,
300                 .size = sizeof(dpaa_seqn_t),
301                 .align = __alignof__(dpaa_seqn_t),
302         };
303         unsigned int cpu, lcore = rte_lcore_id();
304         int ret;
305
306         BUS_INIT_FUNC_TRACE();
307
308         if ((size_t)arg == 1 || lcore == LCORE_ID_ANY)
309                 lcore = rte_get_main_lcore();
310         else
311                 if (lcore >= RTE_MAX_LCORE)
312                         return -1;
313
314         cpu = rte_lcore_to_cpu_id(lcore);
315
316         dpaa_seqn_dynfield_offset =
317                 rte_mbuf_dynfield_register(&dpaa_seqn_dynfield_desc);
318         if (dpaa_seqn_dynfield_offset < 0) {
319                 DPAA_BUS_LOG(ERR, "Failed to register mbuf field for dpaa sequence number\n");
320                 return -rte_errno;
321         }
322
323         /* Initialise bman thread portals */
324         ret = bman_thread_init();
325         if (ret) {
326                 DPAA_BUS_LOG(ERR, "bman_thread_init failed on core %u"
327                              " (lcore=%u) with ret: %d", cpu, lcore, ret);
328                 return ret;
329         }
330
331         DPAA_BUS_LOG(DEBUG, "BMAN thread initialized - CPU=%d lcore=%d",
332                      cpu, lcore);
333
334         /* Initialise qman thread portals */
335         ret = qman_thread_init();
336         if (ret) {
337                 DPAA_BUS_LOG(ERR, "qman_thread_init failed on core %u"
338                             " (lcore=%u) with ret: %d", cpu, lcore, ret);
339                 bman_thread_finish();
340                 return ret;
341         }
342
343         DPAA_BUS_LOG(DEBUG, "QMAN thread initialized - CPU=%d lcore=%d",
344                      cpu, lcore);
345
346         DPAA_PER_LCORE_PORTAL = rte_malloc(NULL, sizeof(struct dpaa_portal),
347                                     RTE_CACHE_LINE_SIZE);
348         if (!DPAA_PER_LCORE_PORTAL) {
349                 DPAA_BUS_LOG(ERR, "Unable to allocate memory");
350                 bman_thread_finish();
351                 qman_thread_finish();
352                 return -ENOMEM;
353         }
354
355         DPAA_PER_LCORE_PORTAL->qman_idx = qman_get_portal_index();
356         DPAA_PER_LCORE_PORTAL->bman_idx = bman_get_portal_index();
357         DPAA_PER_LCORE_PORTAL->tid = rte_gettid();
358
359         ret = pthread_setspecific(dpaa_portal_key,
360                                   (void *)DPAA_PER_LCORE_PORTAL);
361         if (ret) {
362                 DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u"
363                              " (lcore=%u) with ret: %d", cpu, lcore, ret);
364                 dpaa_portal_finish(NULL);
365
366                 return ret;
367         }
368
369         DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
370
371         return 0;
372 }
373
374 int
375 rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
376 {
377         /* Affine above created portal with channel*/
378         u32 sdqcr;
379         int ret;
380
381         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
382                 ret = rte_dpaa_portal_init(arg);
383                 if (ret < 0) {
384                         DPAA_BUS_LOG(ERR, "portal initialization failure");
385                         return ret;
386                 }
387         }
388
389         /* Initialise qman specific portals */
390         ret = fsl_qman_fq_portal_init(fq->qp);
391         if (ret) {
392                 DPAA_BUS_LOG(ERR, "Unable to init fq portal");
393                 return -1;
394         }
395
396         sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
397         qman_static_dequeue_add(sdqcr, fq->qp);
398
399         return 0;
400 }
401
402 int rte_dpaa_portal_fq_close(struct qman_fq *fq)
403 {
404         return fsl_qman_fq_portal_destroy(fq->qp);
405 }
406
407 void
408 dpaa_portal_finish(void *arg)
409 {
410         struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
411
412         if (!dpaa_io_portal) {
413                 DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
414                 return;
415         }
416
417         bman_thread_finish();
418         qman_thread_finish();
419
420         pthread_setspecific(dpaa_portal_key, NULL);
421
422         rte_free(dpaa_io_portal);
423         dpaa_io_portal = NULL;
424         DPAA_PER_LCORE_PORTAL = NULL;
425 }
426
427 static int
428 rte_dpaa_bus_parse(const char *name, void *out)
429 {
430         unsigned int i, j;
431         size_t delta;
432
433         /* There are two ways of passing device name, with and without
434          * separator. "dpaa_bus:fm1-mac3" with separator, and "fm1-mac3"
435          * without separator. Both need to be handled.
436          * It is also possible that "name=fm1-mac3" is passed along.
437          */
438         DPAA_BUS_DEBUG("Parse device name (%s)", name);
439
440         delta = 0;
441         if (strncmp(name, "dpaa_bus:", 9) == 0) {
442                 delta = 9;
443         } else if (strncmp(name, "name=", 5) == 0) {
444                 delta = 5;
445         }
446
447         if (sscanf(&name[delta], "fm%u-mac%u", &i, &j) != 2 ||
448             i >= 2 || j >= 16) {
449                 return -EINVAL;
450         }
451
452         if (out != NULL) {
453                 char *out_name = out;
454                 const size_t max_name_len = sizeof("fm.-mac..") - 1;
455
456                 /* Do not check for truncation, either name ends with
457                  * '\0' or the device name is followed by parameters and there
458                  * will be a ',' instead. Not copying past this comma is not an
459                  * error.
460                  */
461                 strlcpy(out_name, &name[delta], max_name_len + 1);
462
463                 /* Second digit of mac%u could instead be ','. */
464                 if ((strlen(out_name) == max_name_len) &&
465                     out_name[max_name_len] == ',')
466                         out_name[max_name_len] = '\0';
467         }
468
469         return 0;
470 }
471
472 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
473 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
474
475 static int
476 rte_dpaa_bus_scan(void)
477 {
478         int ret;
479
480         BUS_INIT_FUNC_TRACE();
481
482         if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
483             (access(DPAA_DEV_PATH2, F_OK) != 0)) {
484                 RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
485                 return 0;
486         }
487
488         if (rte_dpaa_bus.detected)
489                 return 0;
490
491         rte_dpaa_bus.detected = 1;
492
493         /* create the key, supplying a function that'll be invoked
494          * when a portal affined thread will be deleted.
495          */
496         ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
497         if (ret) {
498                 DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
499                 dpaa_clean_device_list();
500                 return ret;
501         }
502
503         return 0;
504 }
505
506 /* register a dpaa bus based dpaa driver */
507 void
508 rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
509 {
510         RTE_VERIFY(driver);
511
512         BUS_INIT_FUNC_TRACE();
513
514         TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
515         /* Update Bus references */
516         driver->dpaa_bus = &rte_dpaa_bus;
517 }
518
519 /* un-register a dpaa bus based dpaa driver */
520 void
521 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
522 {
523         struct rte_dpaa_bus *dpaa_bus;
524
525         BUS_INIT_FUNC_TRACE();
526
527         dpaa_bus = driver->dpaa_bus;
528
529         TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
530         /* Update Bus references */
531         driver->dpaa_bus = NULL;
532 }
533
534 static int
535 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
536                       struct rte_dpaa_device *dev)
537 {
538         if (!drv || !dev) {
539                 DPAA_BUS_DEBUG("Invalid drv or dev received.");
540                 return -1;
541         }
542
543         if (drv->drv_type == dev->device_type)
544                 return 0;
545
546         return -1;
547 }
548
549 static int
550 rte_dpaa_bus_dev_build(void)
551 {
552         int ret;
553
554         /* Load the device-tree driver */
555         ret = of_init();
556         if (ret) {
557                 DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
558                 return -1;
559         }
560
561         /* Get the interface configurations from device-tree */
562         dpaa_netcfg = netcfg_acquire();
563         if (!dpaa_netcfg) {
564                 DPAA_BUS_LOG(ERR,
565                         "netcfg failed: /dev/fsl_usdpaa device not available");
566                 DPAA_BUS_WARN(
567                         "Check if you are using USDPAA based device tree");
568                 return -EINVAL;
569         }
570
571         RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
572
573         if (!dpaa_netcfg->num_ethports) {
574                 DPAA_BUS_LOG(INFO, "NO DPDK mapped net interfaces available");
575                 /* This is not an error */
576         }
577
578 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
579         dump_netcfg(dpaa_netcfg);
580 #endif
581
582         DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d",
583                      dpaa_netcfg->num_ethports);
584         ret = dpaa_create_device_list();
585         if (ret) {
586                 DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
587                 return ret;
588         }
589         return 0;
590 }
591
592 static int rte_dpaa_setup_intr(struct rte_intr_handle *intr_handle)
593 {
594         int fd;
595
596         fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
597         if (fd < 0) {
598                 DPAA_BUS_ERR("Cannot set up eventfd, error %i (%s)",
599                              errno, strerror(errno));
600                 return errno;
601         }
602
603         if (rte_intr_fd_set(intr_handle, fd))
604                 return rte_errno;
605
606         if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT))
607                 return rte_errno;
608
609         return 0;
610 }
611
612 static int
613 rte_dpaa_bus_probe(void)
614 {
615         int ret = -1;
616         struct rte_dpaa_device *dev;
617         struct rte_dpaa_driver *drv;
618         FILE *svr_file = NULL;
619         unsigned int svr_ver;
620         int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_ALLOWLIST;
621         static int process_once;
622
623         /* If DPAA bus is not present nothing needs to be done */
624         if (!rte_dpaa_bus.detected)
625                 return 0;
626
627         /* Device list creation is only done once */
628         if (!process_once) {
629                 rte_dpaa_bus_dev_build();
630                 /* One time load of Qman/Bman drivers */
631                 ret = qman_global_init();
632                 if (ret) {
633                         DPAA_BUS_ERR("QMAN initialization failed: %d",
634                                      ret);
635                         return ret;
636                 }
637                 ret = bman_global_init();
638                 if (ret) {
639                         DPAA_BUS_ERR("BMAN initialization failed: %d",
640                                      ret);
641                         return ret;
642                 }
643         }
644         process_once = 1;
645
646         /* If no device present on DPAA bus nothing needs to be done */
647         if (TAILQ_EMPTY(&rte_dpaa_bus.device_list))
648                 return 0;
649
650         svr_file = fopen(DPAA_SOC_ID_FILE, "r");
651         if (svr_file) {
652                 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
653                         dpaa_svr_family = svr_ver & SVR_MASK;
654                 fclose(svr_file);
655         }
656
657         TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
658                 if (dev->device_type == FSL_DPAA_ETH) {
659                         ret = rte_dpaa_setup_intr(dev->intr_handle);
660                         if (ret)
661                                 DPAA_BUS_ERR("Error setting up interrupt.\n");
662                 }
663         }
664
665         /* And initialize the PA->VA translation table */
666         dpaax_iova_table_populate();
667
668         /* For each registered driver, and device, call the driver->probe */
669         TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
670                 TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
671                         ret = rte_dpaa_device_match(drv, dev);
672                         if (ret)
673                                 continue;
674
675                         if (rte_dev_is_probed(&dev->device))
676                                 continue;
677
678                         if (!drv->probe ||
679                             (dev->device.devargs &&
680                              dev->device.devargs->policy == RTE_DEV_BLOCKED))
681                                 continue;
682
683                         if (probe_all ||
684                             (dev->device.devargs &&
685                              dev->device.devargs->policy == RTE_DEV_ALLOWED)) {
686                                 ret = drv->probe(drv, dev);
687                                 if (ret) {
688                                         DPAA_BUS_ERR("unable to probe:%s",
689                                                      dev->name);
690                                 } else {
691                                         dev->driver = drv;
692                                         dev->device.driver = &drv->driver;
693                                 }
694                         }
695                         break;
696                 }
697         }
698
699         /* Register DPAA mempool ops only if any DPAA device has
700          * been detected.
701          */
702         rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
703
704         return 0;
705 }
706
707 static struct rte_device *
708 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
709                      const void *data)
710 {
711         struct rte_dpaa_device *dev;
712         const struct rte_dpaa_device *dstart;
713
714         /* find_device is called with 'data' as an opaque object - just call
715          * cmp with this and each device object on bus.
716          */
717
718         if (start != NULL) {
719                 dstart = RTE_DEV_TO_DPAA_CONST(start);
720                 dev = TAILQ_NEXT(dstart, next);
721         } else {
722                 dev = TAILQ_FIRST(&rte_dpaa_bus.device_list);
723         }
724
725         while (dev != NULL) {
726                 if (cmp(&dev->device, data) == 0) {
727                         DPAA_BUS_DEBUG("Found dev=(%s)\n", dev->device.name);
728                         return &dev->device;
729                 }
730                 dev = TAILQ_NEXT(dev, next);
731         }
732
733         DPAA_BUS_DEBUG("Unable to find any device\n");
734         return NULL;
735 }
736
737 /*
738  * Get iommu class of DPAA2 devices on the bus.
739  */
740 static enum rte_iova_mode
741 rte_dpaa_get_iommu_class(void)
742 {
743         if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
744             (access(DPAA_DEV_PATH2, F_OK) != 0)) {
745                 return RTE_IOVA_DC;
746         }
747         return RTE_IOVA_PA;
748 }
749
750 static int
751 dpaa_bus_plug(struct rte_device *dev __rte_unused)
752 {
753         /* No operation is performed while plugging the device */
754         return 0;
755 }
756
757 static int
758 dpaa_bus_unplug(struct rte_device *dev __rte_unused)
759 {
760         /* No operation is performed while unplugging the device */
761         return 0;
762 }
763
764 static void *
765 dpaa_bus_dev_iterate(const void *start, const char *str,
766                      const struct rte_dev_iterator *it __rte_unused)
767 {
768         const struct rte_dpaa_device *dstart;
769         struct rte_dpaa_device *dev;
770         char *dup, *dev_name = NULL;
771
772         if (str == NULL) {
773                 DPAA_BUS_DEBUG("No device string");
774                 return NULL;
775         }
776
777         /* Expectation is that device would be name=device_name */
778         if (strncmp(str, "name=", 5) != 0) {
779                 DPAA_BUS_DEBUG("Invalid device string (%s)\n", str);
780                 return NULL;
781         }
782
783         /* Now that name=device_name format is available, split */
784         dup = strdup(str);
785         dev_name = dup + strlen("name=");
786
787         if (start != NULL) {
788                 dstart = RTE_DEV_TO_DPAA_CONST(start);
789                 dev = TAILQ_NEXT(dstart, next);
790         } else {
791                 dev = TAILQ_FIRST(&rte_dpaa_bus.device_list);
792         }
793
794         while (dev != NULL) {
795                 if (strcmp(dev->device.name, dev_name) == 0) {
796                         free(dup);
797                         return &dev->device;
798                 }
799                 dev = TAILQ_NEXT(dev, next);
800         }
801
802         free(dup);
803         return NULL;
804 }
805
806 static struct rte_dpaa_bus rte_dpaa_bus = {
807         .bus = {
808                 .scan = rte_dpaa_bus_scan,
809                 .probe = rte_dpaa_bus_probe,
810                 .parse = rte_dpaa_bus_parse,
811                 .find_device = rte_dpaa_find_device,
812                 .get_iommu_class = rte_dpaa_get_iommu_class,
813                 .plug = dpaa_bus_plug,
814                 .unplug = dpaa_bus_unplug,
815                 .dev_iterate = dpaa_bus_dev_iterate,
816         },
817         .device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
818         .driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
819         .device_count = 0,
820 };
821
822 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
823 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_bus, NOTICE);