* Re-implemented get_fdir_info and get_fdir_stat in private API.
+* **Updated NXP dpaa ethdev PMD.**
+
+ Updated the NXP dpaa ethdev with new features and improvements, including:
+
+ * Added support to use datapath APIs from non-EAL pthread
+
* **Updated NXP dpaa2 ethdev PMD.**
Updated the NXP dpaa2 ethdev with new features and improvements, including:
#define FSL_DPAA_BUS_NAME dpaa_bus
-RTE_DEFINE_PER_LCORE(bool, dpaa_io);
-RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
+RTE_DEFINE_PER_LCORE(struct dpaa_portal *, dpaa_io);
struct fm_eth_port_cfg *
dpaa_get_eth_port_cfg(int dev_id)
{
unsigned int cpu, lcore = rte_lcore_id();
int ret;
- struct dpaa_portal *dpaa_io_portal;
BUS_INIT_FUNC_TRACE();
DPAA_BUS_LOG(DEBUG, "QMAN thread initialized - CPU=%d lcore=%d",
cpu, lcore);
- dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
+ DPAA_PER_LCORE_PORTAL = rte_malloc(NULL, sizeof(struct dpaa_portal),
RTE_CACHE_LINE_SIZE);
- if (!dpaa_io_portal) {
+ if (!DPAA_PER_LCORE_PORTAL) {
DPAA_BUS_LOG(ERR, "Unable to allocate memory");
bman_thread_finish();
qman_thread_finish();
return -ENOMEM;
}
- dpaa_io_portal->qman_idx = qman_get_portal_index();
- dpaa_io_portal->bman_idx = bman_get_portal_index();
- dpaa_io_portal->tid = syscall(SYS_gettid);
+ DPAA_PER_LCORE_PORTAL->qman_idx = qman_get_portal_index();
+ DPAA_PER_LCORE_PORTAL->bman_idx = bman_get_portal_index();
+ DPAA_PER_LCORE_PORTAL->tid = syscall(SYS_gettid);
- ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
+ ret = pthread_setspecific(dpaa_portal_key,
+ (void *)DPAA_PER_LCORE_PORTAL);
if (ret) {
DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u"
" (lcore=%u) with ret: %d", cpu, lcore, ret);
return ret;
}
- RTE_PER_LCORE(dpaa_io) = true;
-
DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
return 0;
u32 sdqcr;
int ret;
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init(arg);
if (ret < 0) {
DPAA_BUS_LOG(ERR, "portal initialization failure");
rte_free(dpaa_io_portal);
dpaa_io_portal = NULL;
-
- RTE_PER_LCORE(dpaa_io) = false;
+ DPAA_PER_LCORE_PORTAL = NULL;
}
static int
netcfg_acquire;
netcfg_release;
per_lcore_dpaa_io;
- per_lcore_held_bufs;
qman_alloc_cgrid_range;
qman_alloc_pool_range;
qman_clear_irq;
extern unsigned int dpaa_svr_family;
-extern RTE_DEFINE_PER_LCORE(bool, dpaa_io);
-
struct rte_dpaa_device;
struct rte_dpaa_driver;
rte_dpaa_remove_t remove;
};
+/* Create storage for dqrr entries per lcore */
+#define DPAA_PORTAL_DEQUEUE_DEPTH 16
+struct dpaa_portal_dqrr {
+ void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
+ uint64_t dqrr_held;
+ uint8_t dqrr_size;
+};
+
struct dpaa_portal {
uint32_t bman_idx; /**< BMAN Portal ID*/
uint32_t qman_idx; /**< QMAN Portal ID*/
+ struct dpaa_portal_dqrr dpaa_held_bufs;
+ struct rte_crypto_op **dpaa_sec_ops;
+ int dpaa_sec_op_nb;
uint64_t tid;/**< Parent Thread id for this portal */
};
+RTE_DECLARE_PER_LCORE(struct dpaa_portal *, dpaa_io);
+
+#define DPAA_PER_LCORE_PORTAL \
+ RTE_PER_LCORE(dpaa_io)
+#define DPAA_PER_LCORE_DQRR_SIZE \
+ RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_size
+#define DPAA_PER_LCORE_DQRR_HELD \
+ RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.dqrr_held
+#define DPAA_PER_LCORE_DQRR_MBUF(i) \
+ RTE_PER_LCORE(dpaa_io)->dpaa_held_bufs.mbuf[i]
+#define DPAA_PER_LCORE_RTE_CRYPTO_OP \
+ RTE_PER_LCORE(dpaa_io)->dpaa_sec_ops
+#define DPAA_PER_LCORE_DPAA_SEC_OP_NB \
+ RTE_PER_LCORE(dpaa_io)->dpaa_sec_op_nb
+
/* Various structures representing contiguous memory maps */
struct dpaa_memseg {
TAILQ_ENTRY(dpaa_memseg) next;
} \
RTE_PMD_EXPORT_NAME(nm, __COUNTER__)
-/* Create storage for dqrr entries per lcore */
-#define DPAA_PORTAL_DEQUEUE_DEPTH 16
-struct dpaa_portal_dqrr {
- void *mbuf[DPAA_PORTAL_DEQUEUE_DEPTH];
- uint64_t dqrr_held;
- uint8_t dqrr_size;
-};
-
-RTE_DECLARE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
-
-#define DPAA_PER_LCORE_DQRR_SIZE RTE_PER_LCORE(held_bufs).dqrr_size
-#define DPAA_PER_LCORE_DQRR_HELD RTE_PER_LCORE(held_bufs).dqrr_held
-#define DPAA_PER_LCORE_DQRR_MBUF(i) RTE_PER_LCORE(held_bufs).mbuf[i]
-
__rte_internal
struct fm_eth_port_cfg *dpaa_get_eth_port_cfg(int dev_id);
static uint8_t cryptodev_driver_id;
-static __thread struct rte_crypto_op **dpaa_sec_ops;
-static __thread int dpaa_sec_op_nb;
-
static int
dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
struct dpaa_sec_job *job;
struct dpaa_sec_op_ctx *ctx;
- if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
+ if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
return qman_cb_dqrr_defer;
if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
}
mbuf->data_len = len;
}
- dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
+ DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
dpaa_sec_op_ending(ctx);
return qman_cb_dqrr_consume;
DPAA_SEC_ERR("Unable to prepare sec cdb");
return ret;
}
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_SEC_ERR("Failure in affining portal");
}
}
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
retval = rte_dpaa_portal_init((void *)1);
if (retval) {
DPAA_SEC_ERR("Unable to initialize portal");
struct dpaa_port *portal = (struct dpaa_port *)port;
struct rte_mbuf *mbuf;
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
/* Affine current thread context to a qman portal */
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
struct dpaa_port *portal = (struct dpaa_port *)port;
struct rte_mbuf *mbuf;
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
/* Affine current thread context to a qman portal */
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
MEMPOOL_INIT_FUNC_TRACE();
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_MEMPOOL_ERR(
DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d",
n, bp_info->bpid);
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
return -1;
}
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d",
is_global_init = 1;
}
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)1);
if (ret) {
DPAA_PMD_ERR("Unable to initialize portal");
if (likely(fq->is_static))
return dpaa_eth_queue_portal_rx(fq, bufs, nb_bufs);
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_PMD_ERR("Failure in affining portal");
int ret, realloc_mbuf = 0;
uint32_t seqn, index, flags[DPAA_TX_BURST_SIZE] = {0};
- if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
ret = rte_dpaa_portal_init((void *)0);
if (ret) {
DPAA_PMD_ERR("Failure in affining portal");