X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Focteontx%2Ftimvf_evdev.c;h=b461209c2bd332ac963a9ac551aed7b961b01d69;hb=1b533790f44ee160bae86bdcc5307d7b52225726;hp=c66db437eb9d4a80527f6f56a4dbf1913b782e62;hpb=d1925c87d0ddadc689d76010f0fa71e060a13aff;p=dpdk.git diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c index c66db437eb..b461209c2b 100644 --- a/drivers/event/octeontx/timvf_evdev.c +++ b/drivers/event/octeontx/timvf_evdev.c @@ -82,11 +82,52 @@ timvf_get_start_cyc(uint64_t *now, uint8_t ring_id) return octeontx_mbox_send(&hdr, NULL, 0, now, sizeof(uint64_t)); } +static int +optimize_bucket_parameters(struct timvf_ring *timr) +{ + uint32_t hbkts; + uint32_t lbkts; + uint64_t tck_nsec; + + hbkts = rte_align32pow2(timr->nb_bkts); + tck_nsec = RTE_ALIGN_MUL_CEIL(timr->max_tout / (hbkts - 1), 10); + + if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS)) + hbkts = 0; + + lbkts = rte_align32prevpow2(timr->nb_bkts); + tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / (lbkts - 1)), 10); + + if ((tck_nsec < 1000 || hbkts > TIM_MAX_BUCKETS)) + lbkts = 0; + + if (!hbkts && !lbkts) + return 0; + + if (!hbkts) { + timr->nb_bkts = lbkts; + goto end; + } else if (!lbkts) { + timr->nb_bkts = hbkts; + goto end; + } + + timr->nb_bkts = (hbkts - timr->nb_bkts) < + (timr->nb_bkts - lbkts) ? hbkts : lbkts; +end: + timr->get_target_bkt = bkt_and; + timr->tck_nsec = RTE_ALIGN_MUL_CEIL((timr->max_tout / + (timr->nb_bkts - 1)), 10); + return 1; +} + static int timvf_ring_start(const struct rte_event_timer_adapter *adptr) { int ret; + uint8_t use_fpa = 0; uint64_t interval; + uintptr_t pool; struct timvf_ctrl_reg rctrl; struct timvf_mbox_dev_info dinfo; struct timvf_ring *timr = adptr->data->adapter_priv; @@ -116,6 +157,9 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr) return -EINVAL; } + if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf")) + use_fpa = 1; + /*CTRL0 register.*/ rctrl.rctrl0 = interval; @@ -128,8 +172,24 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr) rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40; + if (use_fpa) { + pool = (uintptr_t)((struct rte_mempool *) + timr->chunk_pool)->pool_id; + ret = octeontx_fpa_bufpool_gpool(pool); + if (ret < 0) { + timvf_log_dbg("Unable to get gaura id"); + ret = -ENOMEM; + goto error; + } + timvf_write64((uint64_t)ret, + (uint8_t *)timr->vbar0 + TIM_VRING_AURA); + } else { + rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */; + } + timvf_write64((uintptr_t)timr->bkt, (uint8_t *)timr->vbar0 + TIM_VRING_BASE); + timvf_set_chunk_refill(timr, use_fpa); if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) { ret = -EACCES; goto error; @@ -179,6 +239,7 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr) struct timvf_ring *timr; struct timvf_info tinfo; const char *mempool_ops; + unsigned int mp_flags = 0; if (timvf_info(&tinfo) < 0) return -ENODEV; @@ -202,7 +263,7 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr) timr->clk_src = (int) rcfg->clk_src; timr->tim_ring_id = adptr->data->id; - timr->tck_nsec = rcfg->timer_tick_ns; + timr->tck_nsec = RTE_ALIGN_MUL_CEIL(rcfg->timer_tick_ns, 10); timr->max_tout = rcfg->max_tmo_ns; timr->nb_bkts = (timr->max_tout / timr->tck_nsec); timr->vbar0 = timvf_bar(timr->tim_ring_id, 0); @@ -212,16 +273,33 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr) timr->nb_chunks = nb_timers / nb_chunk_slots; + /* Try to optimize the bucket parameters. */ + if ((rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES) + && !rte_is_power_of_2(timr->nb_bkts)) { + if (optimize_bucket_parameters(timr)) { + timvf_log_info("Optimized configured values"); + timvf_log_dbg("nb_bkts : %"PRIu32"", timr->nb_bkts); + timvf_log_dbg("tck_nsec : %"PRIu64"", timr->tck_nsec); + } else + timvf_log_info("Failed to Optimize configured values"); + } + + if (rcfg->flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) { + mp_flags = MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET; + timvf_log_info("Using single producer mode"); + } + timr->bkt = rte_zmalloc("octeontx_timvf_bucket", (timr->nb_bkts) * sizeof(struct tim_mem_bucket), 0); if (timr->bkt == NULL) goto mem_err; - snprintf(pool_name, 30, "timvf_chunk_pool%d", timr->tim_ring_id); + snprintf(pool_name, sizeof(pool_name), "timvf_chunk_pool%d", + timr->tim_ring_id); timr->chunk_pool = (void *)rte_mempool_create_empty(pool_name, timr->nb_chunks, TIM_CHUNK_SIZE, 0, 0, rte_socket_id(), - 0); + mp_flags); if (!timr->chunk_pool) { rte_free(timr->bkt); @@ -305,14 +383,26 @@ timvf_timer_adapter_caps_get(const struct rte_eventdev *dev, uint64_t flags, uint8_t enable_stats) { RTE_SET_USED(dev); - RTE_SET_USED(flags); if (enable_stats) { timvf_ops.stats_get = timvf_stats_get; timvf_ops.stats_reset = timvf_stats_reset; } + if (flags & RTE_EVENT_TIMER_ADAPTER_F_SP_PUT) + timvf_ops.arm_burst = enable_stats ? + timvf_timer_arm_burst_sp_stats : + timvf_timer_arm_burst_sp; + else + timvf_ops.arm_burst = enable_stats ? + timvf_timer_arm_burst_mp_stats : + timvf_timer_arm_burst_mp; + + timvf_ops.arm_tmo_tick_burst = enable_stats ? + timvf_timer_arm_tmo_brst_stats : + timvf_timer_arm_tmo_brst; + timvf_ops.cancel_burst = timvf_timer_cancel_burst; *caps = RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT; *ops = &timvf_ops; - return -EINVAL; + return 0; }