#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_debug.h>
-#include <rte_pci.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_memory.h>
unsigned int dpaa_svr_family;
-RTE_DEFINE_PER_LCORE(bool, _dpaa_io);
+RTE_DEFINE_PER_LCORE(bool, dpaa_io);
RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
-static inline void
-dpaa_add_to_device_list(struct rte_dpaa_device *dev)
+static int
+compare_dpaa_devices(struct rte_dpaa_device *dev1,
+ struct rte_dpaa_device *dev2)
{
- TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
+ int comp = 0;
+
+ /* Segragating ETH from SEC devices */
+ if (dev1->device_type > dev2->device_type)
+ comp = 1;
+ else if (dev1->device_type < dev2->device_type)
+ comp = -1;
+ else
+ comp = 0;
+
+ if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
+ return comp;
+
+ if (dev1->id.fman_id > dev2->id.fman_id) {
+ comp = 1;
+ } else if (dev1->id.fman_id < dev2->id.fman_id) {
+ comp = -1;
+ } else {
+ /* FMAN ids match, check for mac_id */
+ if (dev1->id.mac_id > dev2->id.mac_id)
+ comp = 1;
+ else if (dev1->id.mac_id < dev2->id.mac_id)
+ comp = -1;
+ else
+ comp = 0;
+ }
+
+ return comp;
}
static inline void
-dpaa_remove_from_device_list(struct rte_dpaa_device *dev)
+dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
{
- TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, dev, next);
+ int comp, inserted = 0;
+ struct rte_dpaa_device *dev = NULL;
+ struct rte_dpaa_device *tdev = NULL;
+
+ TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
+ comp = compare_dpaa_devices(newdev, dev);
+ if (comp < 0) {
+ TAILQ_INSERT_BEFORE(dev, newdev, next);
+ inserted = 1;
+ break;
+ }
+ }
+
+ if (!inserted)
+ TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
}
/*
}
}
-/** XXX move this function into a separate file */
-static int
-_dpaa_portal_init(void *arg)
+int rte_dpaa_portal_init(void *arg)
{
cpu_set_t cpuset;
pthread_t id;
BUS_INIT_FUNC_TRACE();
- if ((uint64_t)arg == 1 || cpu == LCORE_ID_ANY)
+ if ((size_t)arg == 1 || cpu == LCORE_ID_ANY)
cpu = rte_get_master_lcore();
/* if the core id is not supported */
else
return ret;
}
- RTE_PER_LCORE(_dpaa_io) = true;
+ RTE_PER_LCORE(dpaa_io) = true;
DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
return 0;
}
-/*
- * rte_dpaa_portal_init - Wrapper over _dpaa_portal_init with thread level check
- * XXX Complete this
- */
-int rte_dpaa_portal_init(void *arg)
-{
- if (unlikely(!RTE_PER_LCORE(_dpaa_io)))
- return _dpaa_portal_init(arg);
-
- return 0;
-}
-
int
rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
{
/* Affine above created portal with channel*/
u32 sdqcr;
struct qman_portal *qp;
+ int ret;
- if (unlikely(!RTE_PER_LCORE(_dpaa_io)))
- _dpaa_portal_init(arg);
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init(arg);
+ if (ret < 0) {
+ DPAA_BUS_LOG(ERR, "portal initialization failure");
+ return ret;
+ }
+ }
/* Initialise qman specific portals */
qp = fsl_qman_portal_create();
rte_free(dpaa_io_portal);
dpaa_io_portal = NULL;
- RTE_PER_LCORE(_dpaa_io) = false;
+ RTE_PER_LCORE(dpaa_io) = false;
}
#define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
ret = drv->probe(drv, dev);
if (ret)
DPAA_BUS_ERR("Unable to probe.\n");
+
break;
}
}
- rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
+
+ /* Register DPAA mempool ops only if any DPAA device has
+ * been detected.
+ */
+ if (!TAILQ_EMPTY(&rte_dpaa_bus.device_list))
+ rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
svr_file = fopen(DPAA_SOC_ID_FILE, "r");
if (svr_file) {
static enum rte_iova_mode
rte_dpaa_get_iommu_class(void)
{
+ if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
+ (access(DPAA_DEV_PATH2, F_OK) != 0)) {
+ return RTE_IOVA_DC;
+ }
return RTE_IOVA_PA;
}