mempool/octeontx: support freeing
authorSantosh Shukla <santosh.shukla@caviumnetworks.com>
Sun, 8 Oct 2017 12:40:06 +0000 (18:10 +0530)
committerThomas Monjalon <thomas@monjalon.net>
Sun, 8 Oct 2017 17:30:50 +0000 (19:30 +0200)
Upon pool free request from application, Octeon FPA free
does following:
- Uses mbox to reset fpapf pool setup.
- frees fpavf resources.

Signed-off-by: Santosh Shukla <santosh.shukla@caviumnetworks.com>
Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
drivers/mempool/octeontx/octeontx_fpavf.c
drivers/mempool/octeontx/octeontx_fpavf.h
drivers/mempool/octeontx/rte_mempool_octeontx.c

index c0c9d83..3290aa0 100644 (file)
@@ -581,6 +581,117 @@ error_end:
        return (uintptr_t)NULL;
 }
 
+/*
+ * Destroy a buffer pool.
+ */
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
+{
+       void **node, **curr, *head = NULL;
+       uint64_t sz;
+       uint64_t cnt, avail;
+       uint8_t gpool;
+       uintptr_t pool_bar;
+       int ret;
+
+       RTE_SET_USED(node_id);
+
+       /* Wait for all outstanding writes to be committed */
+       rte_smp_wmb();
+
+       if (unlikely(!octeontx_fpa_handle_valid(handle)))
+               return -EINVAL;
+
+       /* get the pool */
+       gpool = octeontx_fpa_bufpool_gpool(handle);
+
+       /* Get pool bar address from handle */
+       pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+        /* Check for no outstanding buffers */
+       cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+                                       FPA_VF_VHAURA_CNT(gpool)));
+       if (cnt) {
+               fpavf_log_dbg("buffer exist in pool cnt %ld\n", cnt);
+               return -EBUSY;
+       }
+
+       rte_spinlock_lock(&fpadev.lock);
+
+       avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+                               FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+       /* Prepare to empty the entire POOL */
+       fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
+                        FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+       fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
+                        FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+       /* Empty the pool */
+       /* Invalidate the POOL */
+       octeontx_gpool_free(gpool);
+
+       /* Process all buffers in the pool */
+       while (avail--) {
+
+               /* Yank a buffer from the pool */
+               node = (void *)(uintptr_t)
+                       fpavf_read64((void *)
+                                   (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
+
+               if (node == NULL) {
+                       fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
+                                     gpool, avail);
+                       break;
+               }
+
+               /* Imsert it into an ordered linked list */
+               for (curr = &head; curr[0] != NULL; curr = curr[0]) {
+                       if ((uintptr_t)node <= (uintptr_t)curr[0])
+                               break;
+               }
+               node[0] = curr[0];
+               curr[0] = node;
+       }
+
+       /* Verify the linked list to be a perfect series */
+       sz = octeontx_fpa_bufpool_block_size(handle) << 7;
+       for (curr = head; curr != NULL && curr[0] != NULL;
+               curr = curr[0]) {
+               if (curr == curr[0] ||
+                       ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
+                       fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
+                                     gpool, curr, curr[0]);
+               }
+       }
+
+       /* Disable pool operation */
+       fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+                        FPA_VF_VHPOOL_START_ADDR(gpool)));
+       fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+                       FPA_VF_VHPOOL_END_ADDR(gpool)));
+
+       (void)octeontx_fpapf_pool_destroy(gpool);
+
+       /* Deactivate the AURA */
+       fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+                       FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+       fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+                       FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+       ret = octeontx_fpapf_aura_detach(gpool);
+       if (ret) {
+               fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
+                             gpool, ret);
+       }
+
+       /* Free VF */
+       (void)octeontx_fpavf_free(gpool);
+
+       rte_spinlock_unlock(&fpadev.lock);
+       return 0;
+}
+
 static void
 octeontx_fpavf_setup(void)
 {
index 4da91f1..b52224a 100644 (file)
@@ -136,6 +136,8 @@ octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
                                unsigned int buf_offset, char **va_start,
                                int node);
 int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
+int
 octeontx_fpa_bufpool_block_size(uintptr_t handle);
 
 static __rte_always_inline uint8_t
index d930a81..6ac4b7d 100644 (file)
@@ -74,10 +74,20 @@ _end:
        return rc;
 }
 
+static void
+octeontx_fpavf_free(struct rte_mempool *mp)
+{
+       uintptr_t pool;
+
+       pool = (uintptr_t)mp->pool_id;
+
+       octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
+}
+
 static struct rte_mempool_ops octeontx_fpavf_ops = {
        .name = "octeontx_fpavf",
        .alloc = octeontx_fpavf_alloc,
-       .free = NULL,
+       .free = octeontx_fpavf_free,
        .enqueue = NULL,
        .dequeue = NULL,
        .get_count = NULL,