]> git.droids-corp.org - dpdk.git/commitdiff
event/octeontx: add option to use fpavf as chunk pool
authorPavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Mon, 9 Apr 2018 21:00:34 +0000 (02:30 +0530)
committerThomas Monjalon <thomas@monjalon.net>
Mon, 16 Apr 2018 09:27:15 +0000 (11:27 +0200)
Add compile-time configurable option to force TIMvf to use Octeontx
FPAvf pool manager as its chunk pool.
When FPAvf is used as pool manager the TIMvf automatically frees the
chunks to FPAvf through gpool-id.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
drivers/event/octeontx/timvf_evdev.c
drivers/event/octeontx/timvf_evdev.h
drivers/event/octeontx/timvf_worker.c
drivers/event/octeontx/timvf_worker.h

index d6a8bb355ec54a05c82278ef6faa37f89e7f4da3..b20a2f1f5a1925c913b8199826d6b5a3f2c3ca63 100644 (file)
@@ -125,7 +125,9 @@ static int
 timvf_ring_start(const struct rte_event_timer_adapter *adptr)
 {
        int ret;
+       uint8_t use_fpa = 0;
        uint64_t interval;
+       uintptr_t pool;
        struct timvf_ctrl_reg rctrl;
        struct timvf_mbox_dev_info dinfo;
        struct timvf_ring *timr = adptr->data->adapter_priv;
@@ -155,6 +157,9 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
                return -EINVAL;
        }
 
+       if (!strcmp(rte_mbuf_best_mempool_ops(), "octeontx_fpavf"))
+               use_fpa = 1;
+
        /*CTRL0 register.*/
        rctrl.rctrl0 = interval;
 
@@ -167,9 +172,24 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
 
        rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
 
+       if (use_fpa) {
+               pool = (uintptr_t)((struct rte_mempool *)
+                               timr->chunk_pool)->pool_id;
+               ret = octeontx_fpa_bufpool_gpool(pool);
+               if (ret < 0) {
+                       timvf_log_dbg("Unable to get gaura id");
+                       ret = -ENOMEM;
+                       goto error;
+               }
+               timvf_write64((uint64_t)ret,
+                               (uint8_t *)timr->vbar0 + TIM_VRING_AURA);
+       } else {
+               rctrl.rctrl1 |= 1ull << 43 /* ENA_DFB (Enable don't free) */;
+       }
+
        timvf_write64((uintptr_t)timr->bkt,
                        (uint8_t *)timr->vbar0 + TIM_VRING_BASE);
-       timvf_set_chunk_refill(timr);
+       timvf_set_chunk_refill(timr, use_fpa);
        if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
                ret = -EACCES;
                goto error;
index b3fc343af6e9dd08d935778a5670d5f99964b0df..b1b2a8464f8504d7e529240708812b6b2f6e898e 100644 (file)
@@ -25,6 +25,7 @@
 #include <rte_reciprocal.h>
 
 #include <octeontx_mbox.h>
+#include <octeontx_fpavf.h>
 
 #define timvf_log(level, fmt, args...) \
        rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \
@@ -220,6 +221,6 @@ uint16_t timvf_timer_arm_tmo_brst_stats(
                const struct rte_event_timer_adapter *adptr,
                struct rte_event_timer **tim, const uint64_t timeout_tick,
                const uint16_t nb_timers);
-void timvf_set_chunk_refill(struct timvf_ring * const timr);
+void timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa);
 
 #endif /* __TIMVF_EVDEV_H__ */
index 02e17b6f5bc07e5a34afddf8f802cbefca8f0879..e681bc6b8cce756c240400815804a6e5f2213031 100644 (file)
@@ -191,7 +191,10 @@ timvf_timer_arm_tmo_brst_stats(const struct rte_event_timer_adapter *adptr,
 }
 
 void
-timvf_set_chunk_refill(struct timvf_ring * const timr)
+timvf_set_chunk_refill(struct timvf_ring * const timr, uint8_t use_fpa)
 {
-       timr->refill_chunk = timvf_refill_chunk_generic;
+       if (use_fpa)
+               timr->refill_chunk = timvf_refill_chunk_fpa;
+       else
+               timr->refill_chunk = timvf_refill_chunk_generic;
 }
index 93254cd39a9497e2f05b9b7d61da8836fb1d1b33..dede1a4a4fcad9e99d425f4470f3ccbaced3f845 100644 (file)
@@ -213,6 +213,28 @@ timvf_refill_chunk_generic(struct tim_mem_bucket * const bkt,
        return chunk;
 }
 
+static inline struct tim_mem_entry *
+timvf_refill_chunk_fpa(struct tim_mem_bucket * const bkt,
+               struct timvf_ring * const timr)
+{
+       struct tim_mem_entry *chunk;
+
+       if (unlikely(rte_mempool_get(timr->chunk_pool, (void **)&chunk)))
+               return NULL;
+
+       *(uint64_t *)(chunk + nb_chunk_slots) = 0;
+       if (bkt->nb_entry) {
+               *(uint64_t *)(((struct tim_mem_entry *)(uintptr_t)
+                               bkt->current_chunk) +
+                               nb_chunk_slots) =
+                       (uintptr_t) chunk;
+       } else {
+               bkt->first_chunk = (uintptr_t) chunk;
+       }
+
+       return chunk;
+}
+
 static inline struct tim_mem_bucket *
 timvf_get_target_bucket(struct timvf_ring * const timr, const uint32_t rel_bkt)
 {