+dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
+{
+ int comp, inserted = 0;
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ comp = compare_dpaa_devices(newdev, dev);
+ if (comp < 0) {
+ TAILQ_INSERT_BEFORE(dev, newdev, next);
+ inserted = 1;
+ break;
+ }
+ }
+
+ if (!inserted)
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
+}
+
+/*
+ * Reads the SEC device from DTS
+ * Returns -1 if SEC devices not available, 0 otherwise
+ */
+static inline int
+dpaa_sec_available(void)
+{
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ return 0;
+ }
+
+ return -1;
+}
+
+static void dpaa_clean_device_list(void);
+
+static struct rte_devargs *
+dpaa_devargs_lookup(struct rte_dpaa_device *dev)
+{
+ struct rte_devargs *devargs;
+ char dev_name[32];
+
+ RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
+ devargs->bus->parse(devargs->name, &dev_name);
+ if (strcmp(dev_name, dev->device.name) == 0) {
+ DPAA_BUS_INFO("**Devargs matched %s", dev_name);
+ return devargs;
+ }
+ }
+ return NULL;
+}
+
+static int
+dpaa_create_device_list(void)
+{
+ int i;
+ int ret;
+ struct rte_dpaa_device *dev;
+ struct fm_eth_port_cfg *cfg;
+ struct fman_if *fman_intf;
+
+ /* Creating Ethernet Devices */
+ for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
+ dev = calloc(1, sizeof(struct rte_dpaa_device));
+ if (!dev) {
+ DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ dev->device.bus = &rte_dpaa_bus.bus;
+
+ cfg = &dpaa_netcfg->port_cfg[i];
+ fman_intf = cfg->fman_if;
+
+ /* Device identifiers */
+ dev->id.fman_id = fman_intf->fman_idx + 1;
+ dev->id.mac_id = fman_intf->mac_idx;
+ dev->device_type = FSL_DPAA_ETH;
+ dev->id.dev_id = i;
+
+ /* Create device name */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
+ fman_intf->mac_idx);
+ DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
+ dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count = i;
+
+ /* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
+ * constantly created only if "sec" property is found in the device
+ * tree. Logically there is no limit for number of devices (QI
+ * interfaces) that can be created.
+ */
+
+ if (dpaa_sec_available()) {
+ DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
+ return 0;
+ }
+
+ /* Creating SEC Devices */
+ for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
+ dev = calloc(1, sizeof(struct rte_dpaa_device));
+ if (!dev) {
+ DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
+ ret = -1;
+ goto cleanup;
+ }
+
+ dev->device_type = FSL_DPAA_CRYPTO;
+ dev->id.dev_id = rte_dpaa_bus.device_count + i;
+
+ /* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
+ * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
+ * allocated for dev->name/
+ */
+ memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
+ sprintf(dev->name, "dpaa-sec%d", i);
+ DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
+ dev->device.name = dev->name;
+ dev->device.devargs = dpaa_devargs_lookup(dev);
+
+ dpaa_add_to_device_list(dev);
+ }
+
+ rte_dpaa_bus.device_count += i;
+
+ return 0;
+
+cleanup:
+ dpaa_clean_device_list();
+ return ret;
+}
+
+static void
+dpaa_clean_device_list(void)
+{
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
+ free(dev);
+ dev = NULL;
+ }
+}
+
+int rte_dpaa_portal_init(void *arg)
+{
+ cpu_set_t cpuset;
+ pthread_t id;
+ uint32_t cpu = rte_lcore_id();
+ int ret;
+ struct dpaa_portal *dpaa_io_portal;
+
+ BUS_INIT_FUNC_TRACE();
+
+ if ((size_t)arg == 1 || cpu == LCORE_ID_ANY)
+ cpu = rte_get_master_lcore();
+ /* if the core id is not supported */
+ else
+ if (cpu >= RTE_MAX_LCORE)
+ return -1;
+
+ /* Set CPU affinity for this thread */
+ CPU_ZERO(&cpuset);
+ CPU_SET(cpu, &cpuset);
+ id = pthread_self();
+ ret = pthread_setaffinity_np(id, sizeof(cpu_set_t), &cpuset);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on "
+ "core :%d with ret: %d", cpu, ret);
+ return ret;
+ }
+
+ /* Initialise bman thread portals */
+ ret = bman_thread_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
+ "core %d with ret: %d", cpu, ret);
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "BMAN thread initialized");
+
+ /* Initialise qman thread portals */
+ ret = qman_thread_init();
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "bman_thread_init failed on "
+ "core %d with ret: %d", cpu, ret);
+ bman_thread_finish();
+ return ret;
+ }
+
+ DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
+
+ dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
+ RTE_CACHE_LINE_SIZE);
+ if (!dpaa_io_portal) {
+ DPAA_BUS_LOG(ERR, "Unable to allocate memory");
+ bman_thread_finish();
+ qman_thread_finish();
+ return -ENOMEM;
+ }
+
+ dpaa_io_portal->qman_idx = qman_get_portal_index();
+ dpaa_io_portal->bman_idx = bman_get_portal_index();
+ dpaa_io_portal->tid = syscall(SYS_gettid);
+
+ ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
+ if (ret) {
+ DPAA_BUS_LOG(ERR, "pthread_setspecific failed on "
+ "core %d with ret: %d", cpu, ret);
+ dpaa_portal_finish(NULL);
+
+ return ret;
+ }
+
+ RTE_PER_LCORE(dpaa_io) = true;
+
+ DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
+
+ return 0;
+}
+
+int
+rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
+{
+ /* Affine above created portal with channel*/
+ u32 sdqcr;
+ struct qman_portal *qp;
+ int ret;
+
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init(arg);
+ if (ret < 0) {
+ DPAA_BUS_LOG(ERR, "portal initialization failure");
+ return ret;
+ }
+ }
+
+ /* Initialise qman specific portals */
+ qp = fsl_qman_portal_create();
+ if (!qp) {
+ DPAA_BUS_LOG(ERR, "Unable to alloc fq portal");
+ return -1;
+ }
+ fq->qp = qp;
+ sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
+ qman_static_dequeue_add(sdqcr, qp);
+
+ return 0;
+}
+
+int rte_dpaa_portal_fq_close(struct qman_fq *fq)
+{
+ return fsl_qman_portal_destroy(fq->qp);
+}
+
+void
+dpaa_portal_finish(void *arg)