net/bnxt: add dpool allocator for EM allocation
authorPeter Spreadborough <peter.spreadborough@broadcom.com>
Sun, 30 May 2021 08:58:46 +0000 (14:28 +0530)
committerAjit Khaparde <ajit.khaparde@broadcom.com>
Thu, 8 Jul 2021 00:01:50 +0000 (02:01 +0200)
The dpool allocator supports variable size entries and
also supports defragmentation of the allocation space.
EM will by default use the fixed size stack allocator.
The dynamic allocator may be selected at build time.
The dpool allocator supports variable size entries and
also supports defragmentation of the allocation space.

Signed-off-by: Peter Spreadborough <peter.spreadborough@broadcom.com>
Signed-off-by: Randy Schacher <stuart.schacher@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Farah Smith <farah.smith@broadcom.com>
Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
12 files changed:
drivers/net/bnxt/tf_core/dpool.c [new file with mode: 0644]
drivers/net/bnxt/tf_core/dpool.h [new file with mode: 0644]
drivers/net/bnxt/tf_core/meson.build
drivers/net/bnxt/tf_core/tf_core.h
drivers/net/bnxt/tf_core/tf_device.h
drivers/net/bnxt/tf_core/tf_device_p58.c
drivers/net/bnxt/tf_core/tf_em.h
drivers/net/bnxt/tf_core/tf_em_hash_internal.c
drivers/net/bnxt/tf_core/tf_em_internal.c
drivers/net/bnxt/tf_core/tf_msg.c
drivers/net/bnxt/tf_core/tf_msg.h
drivers/net/bnxt/tf_core/tf_session.h

diff --git a/drivers/net/bnxt/tf_core/dpool.c b/drivers/net/bnxt/tf_core/dpool.c
new file mode 100644 (file)
index 0000000..0dae42b
--- /dev/null
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <errno.h>
+
+#include <rte_malloc.h>
+
+#include "tfp.h"
+#include "dpool.h"
+
+int dpool_init(struct dpool *dpool,
+              uint32_t start_index,
+              uint32_t size,
+              uint8_t max_alloc_size,
+              void *user_data,
+              int (*move_callback)(void *, uint64_t, uint32_t))
+{
+       uint32_t i;
+       int rc;
+       struct tfp_calloc_parms parms;
+
+       parms.nitems = size;
+       parms.size = sizeof(struct dpool_entry);
+       parms.alignment = 0;
+
+       rc = tfp_calloc(&parms);
+
+       if (rc)
+               return rc;
+
+       dpool->entry = parms.mem_va;
+       dpool->start_index = start_index;
+       dpool->size = size;
+       dpool->max_alloc_size = max_alloc_size;
+       dpool->user_data = user_data;
+       dpool->move_callback = move_callback;
+       /*
+        * Init entries
+        */
+       for (i = 0; i < size; i++) {
+               dpool->entry[i].flags = 0;
+               dpool->entry[i].index = start_index;
+               dpool->entry[i].entry_data = 0UL;
+               start_index++;
+       }
+
+       return 0;
+}
+
+static int dpool_move(struct dpool *dpool,
+                     uint32_t dst_index,
+                     uint32_t src_index)
+{
+       uint32_t size;
+       uint32_t i;
+       if (DP_IS_FREE(dpool->entry[dst_index].flags)) {
+               size = DP_FLAGS_SIZE(dpool->entry[src_index].flags);
+
+               dpool->entry[dst_index].flags = dpool->entry[src_index].flags;
+               dpool->entry[dst_index].entry_data = dpool->entry[src_index].entry_data;
+
+               if (dpool->move_callback != NULL) {
+                       dpool->move_callback(dpool->user_data,
+                                            dpool->entry[src_index].entry_data,
+                                            dst_index + dpool->start_index);
+               }
+
+               dpool->entry[src_index].flags = 0;
+               dpool->entry[src_index].entry_data = 0UL;
+
+               for (i = 1; i < size; i++) {
+                       dpool->entry[dst_index + i].flags = size;
+                       dpool->entry[src_index + i].flags = 0;
+               }
+       } else {
+               return -1;
+       }
+
+       return 0;
+}
+
+
+int dpool_defrag(struct dpool *dpool,
+                uint32_t entry_size,
+                uint8_t defrag)
+{
+       struct dpool_free_list *free_list;
+       struct dpool_adj_list *adj_list;
+       uint32_t count;
+       uint32_t index;
+       uint32_t used;
+       uint32_t i;
+       uint32_t size;
+       uint32_t largest_free_index = 0;
+       uint32_t largest_free_size;
+       uint32_t max;
+       uint32_t max_index;
+       uint32_t max_size = 0;
+       int rc;
+
+       free_list = rte_zmalloc("dpool_free_list",
+                               sizeof(struct dpool_free_list), 0);
+       if (free_list == NULL) {
+               TFP_DRV_LOG(ERR, "dpool free list allocation failed\n");
+               return -ENOMEM;
+       }
+
+       adj_list = rte_zmalloc("dpool_adjacent_list",
+                               sizeof(struct dpool_adj_list), 0);
+       if (adj_list == NULL) {
+               TFP_DRV_LOG(ERR, "dpool adjacent list allocation failed\n");
+               return -ENOMEM;
+       }
+
+       while (1) {
+               /*
+                * Create list of free entries
+                */
+               free_list->size = 0;
+               largest_free_size = 0;
+               largest_free_index = 0;
+               count = 0;
+
+               for (i = 0; i < dpool->size; i++) {
+                       if (DP_IS_FREE(dpool->entry[i].flags)) {
+                               if (count == 0)
+                                       index = i;
+                               count++;
+                       } else if (count > 0) {
+                               free_list->entry[free_list->size].index = index;
+                               free_list->entry[free_list->size].size = count;
+
+                               if (count > largest_free_size) {
+                                       largest_free_index = free_list->size;
+                                       largest_free_size = count;
+                               }
+
+                               free_list->size++;
+                               count = 0;
+                       }
+               }
+
+               if (free_list->size == 0)
+                       largest_free_size = count;
+
+               /*
+                * If using defrag to fit and there's a large enough
+                * space then we are done.
+                */
+               if (defrag == DP_DEFRAG_TO_FIT &&
+                   largest_free_size >= entry_size)
+                       goto done;
+
+               /*
+                * Create list of entries adjacent to free entries
+                */
+               count = 0;
+               adj_list->size = 0;
+               used = 0;
+
+               for (i = 0; i < dpool->size; ) {
+                       if (DP_IS_USED(dpool->entry[i].flags)) {
+                               used++;
+
+                               if (count > 0) {
+                                       adj_list->entry[adj_list->size].index = i;
+                                       adj_list->entry[adj_list->size].size =
+                                               DP_FLAGS_SIZE(dpool->entry[i].flags);
+                                       adj_list->entry[adj_list->size].left = count;
+
+                                       if (adj_list->size > 0 && used == 1)
+                                               adj_list->entry[adj_list->size - 1].right = count;
+
+                                       adj_list->size++;
+                               }
+
+                               count = 0;
+                               i += DP_FLAGS_SIZE(dpool->entry[i].flags);
+                       } else {
+                               used = 0;
+                               count++;
+                               i++;
+                       }
+               }
+
+               /*
+                * Using the size of the largest free space available
+                * select the adjacency list entry of that size with
+                * the largest left + right + size count. If there
+                * are no entries of that size then decrement the size
+                * and try again.
+                */
+               max = 0;
+               max_index = 0;
+               max_size = 0;
+
+               for (size = largest_free_size; size > 0; size--) {
+                       for (i = 0; i < adj_list->size; i++) {
+                               if (adj_list->entry[i].size == size &&
+                                   ((size +
+                                     adj_list->entry[i].left +
+                                     adj_list->entry[i].right) > max)) {
+                                       max = size +
+                                               adj_list->entry[i].left +
+                                               adj_list->entry[i].right;
+                                       max_size = size;
+                                       max_index = adj_list->entry[i].index;
+                               }
+                       }
+
+                       if (max)
+                               break;
+               }
+
+               /*
+                * If the max entry is smaller than the largest_free_size
+                * find the first entry in the free list that it cn fit in to.
+                */
+               if (max_size < largest_free_size) {
+                       for (i = 0; i < free_list->size; i++) {
+                               if (free_list->entry[i].size >= max_size) {
+                                       largest_free_index = i;
+                                       break;
+                               }
+                       }
+               }
+
+               /*
+                * If we have a contender then move it to the new spot.
+                */
+               if (max) {
+                       rc = dpool_move(dpool,
+                                       free_list->entry[largest_free_index].index,
+                                       max_index);
+                       if (rc) {
+                               rte_free(free_list);
+                               rte_free(adj_list);
+                               return rc;
+                       }
+               } else {
+                       break;
+               }
+       }
+
+done:
+       rte_free(free_list);
+       rte_free(adj_list);
+       return largest_free_size;
+}
+
+
+uint32_t dpool_alloc(struct dpool *dpool,
+                    uint32_t size,
+                    uint8_t defrag)
+{
+       uint32_t i;
+       uint32_t j;
+       uint32_t count = 0;
+       uint32_t first_entry_index;
+       int rc;
+
+       if (size > dpool->max_alloc_size || size == 0)
+               return DP_INVALID_INDEX;
+
+       /*
+        * Defrag requires EM move support.
+        */
+       if (defrag != DP_DEFRAG_NONE &&
+           dpool->move_callback == NULL)
+               return DP_INVALID_INDEX;
+
+       while (1) {
+               /*
+                * find <size> consecutive free entries
+                */
+               for (i = 0; i < dpool->size; i++) {
+                       if (DP_IS_FREE(dpool->entry[i].flags)) {
+                               if (count == 0)
+                                       first_entry_index = i;
+
+                               count++;
+
+                               if (count == size) {
+                                       for (j = 0; j < size; j++) {
+                                               dpool->entry[j + first_entry_index].flags = size;
+                                               if (j == 0)
+                                                       dpool->entry[j + first_entry_index].flags |=
+                                                               DP_FLAGS_START;
+                                       }
+
+                                       dpool->entry[i].entry_data = 0UL;
+                                       return (first_entry_index + dpool->start_index);
+                               }
+                       } else {
+                               count = 0;
+                       }
+               }
+
+               /*
+                * If defragging then do it to it
+                */
+               if (defrag != DP_DEFRAG_NONE) {
+                       rc = dpool_defrag(dpool, size, defrag);
+
+                       if (rc < 0)
+                               return DP_INVALID_INDEX;
+               } else {
+                       break;
+               }
+
+               /*
+                * If the defrag created enough space then try the
+                * alloc again else quit.
+                */
+               if ((uint32_t)rc < size)
+                       break;
+       }
+
+       return DP_INVALID_INDEX;
+}
+
+int dpool_free(struct dpool *dpool,
+              uint32_t index)
+{
+       uint32_t i;
+       int start = (index - dpool->start_index);
+       uint32_t size;
+
+       if (start < 0)
+               return -1;
+
+       if (DP_IS_START(dpool->entry[start].flags)) {
+               size = DP_FLAGS_SIZE(dpool->entry[start].flags);
+               if (size > dpool->max_alloc_size || size == 0)
+                       return -1;
+
+               for (i = start; i < (start + size); i++)
+                       dpool->entry[i].flags = 0;
+
+               return 0;
+       }
+
+       return -1;
+}
+
+void dpool_free_all(struct dpool *dpool)
+{
+       uint32_t i;
+
+       for (i = 0; i < dpool->size; i++)
+               dpool_free(dpool, dpool->entry[i].index);
+}
+
+int dpool_set_entry_data(struct dpool *dpool,
+                        uint32_t index,
+                        uint64_t entry_data)
+{
+       int start = (index - dpool->start_index);
+
+       if (start < 0)
+               return -1;
+
+       if (DP_IS_START(dpool->entry[start].flags)) {
+               dpool->entry[start].entry_data = entry_data;
+               return 0;
+       }
+
+       return -1;
+}
diff --git a/drivers/net/bnxt/tf_core/dpool.h b/drivers/net/bnxt/tf_core/dpool.h
new file mode 100644 (file)
index 0000000..8b0a418
--- /dev/null
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2019-2021 Broadcom
+ * All rights reserved.
+ */
+
+#ifndef _DPOOL_H_
+#define _DPOOL_H_
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#define DP_MAX_FREE_SIZE 0x8000 /* 32K */
+
+#define DP_INVALID_INDEX 0xffffffff
+
+#define DP_FLAGS_START   0x80000000
+#define DP_IS_START(flags) ((flags) & DP_FLAGS_START)
+
+#define DP_FLAGS_SIZE_SHIFT 0
+#define DP_FLAGS_SIZE_MASK  0x07
+
+#define DP_FLAGS_SIZE(flags) (((flags) >> DP_FLAGS_SIZE_SHIFT) & DP_FLAGS_SIZE_MASK)
+
+#define DP_IS_FREE(flags) (((flags) & DP_FLAGS_SIZE_MASK) == 0)
+#define DP_IS_USED(flags) ((flags) & DP_FLAGS_SIZE_MASK)
+
+#define DP_DEFRAG_NONE   0x0
+#define DP_DEFRAG_ALL    0x1
+#define DP_DEFRAG_TO_FIT 0x2
+
+/**
+ * Free list entry
+ *
+ * Each entry includes an index in to the dpool entry array
+ * and the size of dpool array entry.
+ */
+struct dpool_free_list_entry {
+       /*
+        * Index in to dpool entry array
+        */
+       uint32_t index;
+       /*
+        * The size of the entry in the dpool entry array
+        */
+       uint32_t size;
+};
+
+/**
+ * Free list
+ *
+ * Used internally to record free entries in the dpool entry array.
+ * Each entry represents a single or multiple contiguous entries
+ * in the dpool entry array.
+ *
+ * Used only during the defrag operation.
+ */
+struct dpool_free_list {
+       /*
+        * Number of entries in the free list
+        */
+       uint32_t size;
+       /*
+        * List of unused entries in the dpool entry array
+        */
+       struct dpool_free_list_entry entry[DP_MAX_FREE_SIZE];
+};
+
+/**
+ * Adjacent list entry
+ *
+ * Each entry includes and index in to the dpool entry array,
+ * the size of the entry and the counts of free entries to the
+ * right and left off that entry.
+ */
+struct dpool_adj_list_entry {
+       /*
+        * Index in to dpool entry array
+        */
+       uint32_t index;
+       /*
+        * The size of the entry in the dpool entry array
+        */
+       uint32_t size;
+       /*
+        * Number of free entries directly to the  left of
+        * this entry
+        */
+       uint32_t left;
+       /*
+        * Number of free entries directly to the right of
+        * this entry
+        */
+       uint32_t right;
+};
+
+/**
+ * Adjacent list
+ *
+ * A list of references to entries in the dpool entry array that
+ * have free entries to the left and right. Since we pack to the
+ * left entries will always have a non zero left cout.
+ *
+ * Used only during the defrag operation.
+ */
+struct dpool_adj_list {
+       /*
+        * Number of entries in the adj list
+        */
+       uint32_t size;
+       /*
+        * List of entries in the dpool entry array that have
+        * free entries directly to their left and right.
+        */
+       struct dpool_adj_list_entry entry[DP_MAX_FREE_SIZE];
+};
+
+/**
+ * Dpool entry
+ *
+ * Each entry includes flags and the FW index.
+ */
+struct dpool_entry {
+       uint32_t flags;
+       uint32_t index;
+       uint64_t entry_data;
+};
+
+/**
+ * Dpool
+ *
+ * Used to manage resource pool. Includes the start FW index, the
+ * size of the entry array and the entry array it's self.
+ */
+struct dpool {
+       uint32_t start_index;
+       uint32_t size;
+       uint8_t  max_alloc_size;
+       void *user_data;
+       int (*move_callback)(void *user_data,
+                            uint64_t entry_data,
+                            uint32_t new_index);
+       struct dpool_entry *entry;
+};
+
+/**
+ * dpool_init
+ *
+ * Initialize the dpool
+ *
+ * [in] dpool
+ *      Pointer to a dpool structure that includes an entry field
+ *      that points to the entry array. The user is responsible for
+ *      allocating memory for the dpool struct and the entry array.
+ *
+ * [in] start_index
+ *      The base index to use.
+ *
+ * [in] size
+ *      The number of entries
+ *
+ * [in] max_alloc_size
+ *      The number of entries
+ *
+ * [in] user_data
+ *      Pointer to user data. Will be passed in callbacks.
+ *
+ * [in] move_callback
+ *      Pointer to move EM entry callback.
+ *
+ * Return
+ *      -  0 on success
+ *      - -1 on failure
+ *
+ */
+int dpool_init(struct dpool *dpool,
+              uint32_t start_index,
+              uint32_t size,
+              uint8_t max_alloc_size,
+              void *user_data,
+              int (*move_callback)(void *, uint64_t, uint32_t));
+
+/**
+ * dpool_alloc
+ *
+ * Request a FW index of size and if necessary de-fragment the dpool
+ * array.
+ *
+ * [i] dpool
+ *     The dpool
+ *
+ * [i] size
+ *     The size of the requested allocation.
+ *
+ * [i] defrag
+ *     Operation to apply when there is insufficient space:
+ *
+ *     DP_DEFRAG_NONE   (0x0) - Don't do anything.
+ *     DP_DEFRAG_ALL    (0x1) - Defrag until there is nothing left
+ *                              to defrag.
+ *     DP_DEFRAG_TO_FIT (0x2) - Defrag until there is just enough space
+ *                              to insert the requested allocation.
+ *
+ * Return
+ *      - FW index on success
+ *      - DP_INVALID_INDEX on failure
+ *
+ */
+uint32_t dpool_alloc(struct dpool *dpool,
+                    uint32_t size,
+                    uint8_t defrag);
+
+/**
+ * dpool_set_entry_data
+ *
+ * Set the entry data field. This will be passed to callbacks.
+ *
+ * [i] dpool
+ *     The dpool
+ *
+ * [i] index
+ *     FW index
+ *
+ * [i] entry_data
+ *     Entry data value
+ *
+ * Return
+ *      - FW index on success
+ *      - DP_INVALID_INDEX on failure
+ *
+ */
+int dpool_set_entry_data(struct dpool *dpool,
+                        uint32_t index,
+                        uint64_t entry_data);
+
+/**
+ * dpool_free
+ *
+ * Free allocated entry. The is responsible for the dpool and dpool
+ * entry array memory.
+ *
+ * [in] dpool
+ *      The pool
+ *
+ * [in] index
+ *      FW index to free up.
+ *
+ * Result
+ *      - 0  on success
+ *      - -1 on failure
+ *
+ */
+int dpool_free(struct dpool *dpool,
+              uint32_t index);
+
+/**
+ * dpool_free_all
+ *
+ * Free all entries.
+ *
+ * [in] dpool
+ *      The pool
+ *
+ * Result
+ *      - 0  on success
+ *      - -1 on failure
+ *
+ */
+void dpool_free_all(struct dpool *dpool);
+
+/**
+ * dpool_dump
+ *
+ * Debug/util function to dump the dpool array.
+ *
+ * [in] dpool
+ *      The pool
+ *
+ */
+void dpool_dump(struct dpool *dpool);
+
+/**
+ * dpool_defrag
+ *
+ * De-fragment the dpool array and apply the specified defrag stratagy.
+ *
+ * [in] dpool
+ *      The dpool
+ *
+ * [in] entry_size
+ *      If using the DP_DEFRAG_TO_FIT stratagy defrag will stop when there's
+ *      at least entry_size space available.
+ *
+ * [i] defrag
+ *     Defrag stratagy:
+ *
+ *     DP_DEFRAG_ALL    (0x1) - Defrag until there is nothing left
+ *                              to defrag.
+ *     DP_DEFRAG_TO_FIT (0x2) - Defrag until there is just enough space
+ *                              to insert the requested allocation.
+ *
+ * Return
+ *      < 0 - on failure
+ *      > 0 - The size of the largest free space
+ */
+int dpool_defrag(struct dpool *dpool,
+                uint32_t entry_size,
+                uint8_t defrag);
+
+#endif /* _DPOOL_H_ */
index 2c02214..3a91f04 100644 (file)
@@ -10,6 +10,7 @@ sources += files(
         'tf_core.c',
         'bitalloc.c',
         'tf_msg.c',
+       'dpool.c',
         'rand.c',
         'stack.c',
         'tf_em_common.c',
index 4440d60..08a0830 100644 (file)
@@ -1953,6 +1953,48 @@ struct tf_delete_em_entry_parms {
         */
        uint64_t flow_handle;
 };
+/**
+ * tf_move_em_entry parameter definition
+ */
+struct tf_move_em_entry_parms {
+       /**
+        * [in] receive or transmit direction
+        */
+       enum tf_dir dir;
+       /**
+        * [in] internal or external
+        */
+       enum tf_mem mem;
+       /**
+        * [in] ID of table scope to use (external only)
+        */
+       uint32_t tbl_scope_id;
+       /**
+        * [in] ID of table interface to use (SR2 only)
+        */
+       uint32_t tbl_if_id;
+       /**
+        * [in] epoch group IDs of entry to delete
+        * 2 element array with 2 ids. (SR2 only)
+        */
+       uint16_t *epochs;
+       /**
+        * [out] The index of the entry
+        */
+       uint16_t index;
+       /**
+        * [in] External memory channel type to use
+        */
+       enum tf_ext_mem_chan_type chan_type;
+       /**
+        * [in] The index of the new EM record
+        */
+       uint32_t new_index;
+       /**
+        * [in] structure containing flow delete handle information
+        */
+       uint64_t flow_handle;
+};
 /**
  * tf_search_em_entry parameter definition
  */
index 16c2fe0..31806bb 100644 (file)
@@ -611,6 +611,22 @@ struct tf_dev_ops {
        int (*tf_dev_delete_int_em_entry)(struct tf *tfp,
                                          struct tf_delete_em_entry_parms *parms);
 
+       /**
+        * Move EM hash entry API
+        *
+        * [in] tfp
+        *   Pointer to TF handle
+        *
+        * [in] parms
+        *   Pointer to E/EM move parameters
+        *
+        *    returns:
+        *    0       - Success
+        *    -EINVAL - Error
+        */
+       int (*tf_dev_move_int_em_entry)(struct tf *tfp,
+                                       struct tf_move_em_entry_parms *parms);
+
        /**
         * Insert EEM hash entry API
         *
@@ -661,6 +677,24 @@ struct tf_dev_ops {
        int (*tf_dev_get_em_resc_info)(struct tf *tfp,
                                       struct tf_em_resource_info *parms);
 
+       /**
+        * Move EEM hash entry API
+        *
+        *   Pointer to E/EM move parameters
+        *
+        * [in] tfp
+        *   Pointer to TF handle
+        *
+        * [in] parms
+        *   Pointer to em info
+        *
+        *    returns:
+        *    0       - Success
+        *    -EINVAL - Error
+        */
+       int (*tf_dev_move_ext_em_entry)(struct tf *tfp,
+                                       struct tf_move_em_entry_parms *parms);
+
        /**
         * Allocate EEM table scope
         *
index 32d621e..50facce 100644 (file)
@@ -292,6 +292,11 @@ const struct tf_dev_ops tf_dev_ops_p58 = {
        .tf_dev_get_tcam_resc_info = tf_tcam_get_resc_info,
        .tf_dev_insert_int_em_entry = tf_em_hash_insert_int_entry,
        .tf_dev_delete_int_em_entry = tf_em_hash_delete_int_entry,
+#if (TF_EM_ALLOC == 1)
+       .tf_dev_move_int_em_entry = tf_em_move_int_entry,
+#else
+       .tf_dev_move_int_em_entry = NULL,
+#endif
        .tf_dev_insert_ext_em_entry = NULL,
        .tf_dev_delete_ext_em_entry = NULL,
        .tf_dev_get_em_resc_info = tf_em_get_resc_info,
index 60d90e2..9d168c3 100644 (file)
 
 #include "hcapi_cfa_defs.h"
 
+/**
+ * TF_EM_ALLOC
+ *
+ * 0: Use stack allocator with fixed sized entries
+ *    (default).
+ * 1: Use dpool allocator with variable size
+ *    entries.
+ */
+#define TF_EM_ALLOC 0
+
 #define TF_EM_MIN_ENTRIES     (1 << 15) /* 32K */
 #define TF_EM_MAX_ENTRIES     (1 << 27) /* 128M */
 
@@ -243,6 +253,22 @@ int tf_em_hash_insert_int_entry(struct tf *tfp,
 int tf_em_hash_delete_int_entry(struct tf *tfp,
                                struct tf_delete_em_entry_parms *parms);
 
+/**
+ * Move record from internal EM table
+ *
+ * [in] tfp
+ *   Pointer to TruFlow handle
+ *
+ * [in] parms
+ *   Pointer to input parameters
+ *
+ * Returns:
+ *   0       - Success
+ *   -EINVAL - Parameter error
+ */
+int tf_em_move_int_entry(struct tf *tfp,
+                        struct tf_move_em_entry_parms *parms);
+
 /**
  * Insert record in to external EEM table
  *
index f6c9772..098e8af 100644 (file)
@@ -22,7 +22,9 @@
 /**
  * EM Pool
  */
-extern struct stack em_pool[TF_DIR_MAX];
+#if (TF_EM_ALLOC == 1)
+#include "dpool.h"
+#endif
 
 /**
  * Insert EM internal entry API
@@ -39,7 +41,11 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
        uint16_t rptr_index = 0;
        uint8_t rptr_entry = 0;
        uint8_t num_of_entries = 0;
-       struct stack *pool = &em_pool[parms->dir];
+#if (TF_EM_ALLOC == 1)
+       struct dpool *pool;
+#else
+       struct stack *pool;
+#endif
        uint32_t index;
        uint32_t key0_hash;
        uint32_t key1_hash;
@@ -56,7 +62,20 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
        rc = tf_session_get_device(tfs, &dev);
        if (rc)
                return rc;
+#if (TF_EM_ALLOC == 1)
+       pool = (struct dpool *)tfs->em_pool[parms->dir];
+       index = dpool_alloc(pool,
+                           parms->em_record_sz_in_bits / 128,
+                           DP_DEFRAG_TO_FIT);
 
+       if (index == DP_INVALID_INDEX) {
+               PMD_DRV_LOG(ERR,
+                           "%s, EM entry index allocation failed\n",
+                           tf_dir_2_str(parms->dir));
+               return -1;
+       }
+#else
+       pool = (struct stack *)tfs->em_pool[parms->dir];
        rc = stack_pop(pool, &index);
        if (rc) {
                PMD_DRV_LOG(ERR,
@@ -64,6 +83,7 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
                            tf_dir_2_str(parms->dir));
                return rc;
        }
+#endif
 
        if (dev->ops->tf_dev_cfa_key_hash == NULL)
                return -EINVAL;
@@ -83,19 +103,14 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
                                                  &num_of_entries);
        if (rc) {
                /* Free the allocated index before returning */
+#if (TF_EM_ALLOC == 1)
+               dpool_free(pool, index);
+#else
                stack_push(pool, index);
+#endif
                return -1;
        }
 
-       PMD_DRV_LOG
-                 (DEBUG,
-                  "%s, Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
-                  tf_dir_2_str(parms->dir),
-                  index,
-                  rptr_index,
-                  rptr_entry,
-                  num_of_entries);
-
        TF_SET_GFID(gfid,
                    ((rptr_index << TF_EM_INTERNAL_INDEX_SHIFT) |
                     rptr_entry),
@@ -113,6 +128,9 @@ tf_em_hash_insert_int_entry(struct tf *tfp,
                                     rptr_index,
                                     rptr_entry,
                                     0);
+#if (TF_EM_ALLOC == 1)
+       dpool_set_entry_data(pool, index, parms->flow_handle);
+#endif
        return 0;
 }
 
@@ -127,13 +145,71 @@ tf_em_hash_delete_int_entry(struct tf *tfp,
                            struct tf_delete_em_entry_parms *parms)
 {
        int rc = 0;
-       struct stack *pool = &em_pool[parms->dir];
+       struct tf_session *tfs;
+#if (TF_EM_ALLOC == 1)
+       struct dpool *pool;
+#else
+       struct stack *pool;
+#endif
+       /* Retrieve the session information */
+       rc = tf_session_get_session(tfp, &tfs);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Failed to lookup session, rc:%s\n",
+                           tf_dir_2_str(parms->dir),
+                           strerror(-rc));
+               return rc;
+       }
 
        rc = tf_msg_delete_em_entry(tfp, parms);
 
        /* Return resource to pool */
-       if (rc == 0)
+       if (rc == 0) {
+#if (TF_EM_ALLOC == 1)
+               pool = (struct dpool *)tfs->em_pool[parms->dir];
+               dpool_free(pool, parms->index);
+#else
+               pool = (struct stack *)tfs->em_pool[parms->dir];
                stack_push(pool, parms->index);
+#endif
+       }
+
+       return rc;
+}
+
+#if (TF_EM_ALLOC == 1)
+/** Move EM internal entry API
+ *
+ * returns:
+ * 0
+ * -EINVAL
+ */
+int
+tf_em_move_int_entry(struct tf *tfp,
+                    struct tf_move_em_entry_parms *parms)
+{
+       int rc = 0;
+       struct dpool *pool;
+       struct tf_session *tfs;
+
+       /* Retrieve the session information */
+       rc = tf_session_get_session(tfp, &tfs);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Failed to lookup session, rc:%s\n",
+                           tf_dir_2_str(parms->dir),
+                           strerror(-rc));
+               return rc;
+       }
+
+       rc = tf_msg_move_em_entry(tfp, parms);
+
+       /* Return resource to pool */
+       if (rc == 0) {
+               pool = (struct dpool *)tfs->em_pool[parms->dir];
+               dpool_free(pool, parms->index);
+       }
 
        return rc;
 }
+#endif
index e373a9b..eec15b8 100644 (file)
@@ -15,7 +15,6 @@
 #include "tf_msg.h"
 #include "tfp.h"
 #include "tf_ext_flow_handle.h"
-
 #include "bnxt.h"
 
 #define TF_EM_DB_EM_REC 0
@@ -23,7 +22,9 @@
 /**
  * EM Pool
  */
-struct stack em_pool[TF_DIR_MAX];
+#if (TF_EM_ALLOC == 1)
+#include "dpool.h"
+#else
 
 /**
  * Create EM Tbl pool of memory indexes.
@@ -41,14 +42,35 @@ struct stack em_pool[TF_DIR_MAX];
  *          - Failure, entry not allocated, out of resources
  */
 static int
-tf_create_em_pool(enum tf_dir dir,
+tf_create_em_pool(struct tf_session *tfs,
+                 enum tf_dir dir,
                  uint32_t num_entries,
                  uint32_t start)
 {
        struct tfp_calloc_parms parms;
        uint32_t i, j;
        int rc = 0;
-       struct stack *pool = &em_pool[dir];
+       struct stack *pool;
+
+       /*
+        * Allocate stack pool
+        */
+       parms.nitems = 1;
+       parms.size = sizeof(struct stack);
+       parms.alignment = 0;
+
+       rc = tfp_calloc(&parms);
+
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s, EM stack allocation failure %s\n",
+                           tf_dir_2_str(dir),
+                           strerror(-rc));
+               return rc;
+       }
+
+       pool = (struct stack *)parms.mem_va;
+       tfs->em_pool[dir] = (void *)pool;
 
        /* Assumes that num_entries has been checked before we get here */
        parms.nitems = num_entries / TF_SESSION_EM_ENTRY_SIZE;
@@ -108,6 +130,8 @@ tf_create_em_pool(enum tf_dir dir,
        return 0;
 cleanup:
        tfp_free((void *)parms.mem_va);
+       tfp_free((void *)tfs->em_pool[dir]);
+       tfs->em_pool[dir] = NULL;
        return rc;
 }
 
@@ -120,16 +144,23 @@ cleanup:
  * Return:
  */
 static void
-tf_free_em_pool(enum tf_dir dir)
+tf_free_em_pool(struct tf_session *tfs,
+               enum tf_dir dir)
 {
-       struct stack *pool = &em_pool[dir];
+       struct stack *pool = (struct stack *)tfs->em_pool[dir];
        uint32_t *ptr;
 
-       ptr = stack_items(pool);
+       if (pool != NULL) {
+               ptr = stack_items(pool);
+
+               if (ptr != NULL)
+                       tfp_free(ptr);
 
-       if (ptr != NULL)
-               tfp_free(ptr);
+               tfp_free(pool);
+               tfs->em_pool[dir] = NULL;
+       }
 }
+#endif /* TF_EM_ALLOC != 1 */
 
 /**
  * Insert EM internal entry API
@@ -146,17 +177,44 @@ tf_em_insert_int_entry(struct tf *tfp,
        uint16_t rptr_index = 0;
        uint8_t rptr_entry = 0;
        uint8_t num_of_entries = 0;
-       struct stack *pool = &em_pool[parms->dir];
+       struct tf_session *tfs;
+#if (TF_EM_ALLOC == 1)
+       struct dpool *pool;
+#else
+       struct stack *pool;
+#endif
        uint32_t index;
 
-       rc = stack_pop(pool, &index);
+       /* Retrieve the session information */
+       rc = tf_session_get_session(tfp, &tfs);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Failed to lookup session, rc:%s\n",
+                           tf_dir_2_str(parms->dir),
+                           strerror(-rc));
+               return rc;
+       }
 
+#if (TF_EM_ALLOC == 1)
+       pool = (struct dpool *)tfs->em_pool[parms->dir];
+       index = dpool_alloc(pool, TF_SESSION_EM_ENTRY_SIZE, 0);
+       if (index == DP_INVALID_INDEX) {
+               PMD_DRV_LOG(ERR,
+                           "%s, EM entry index allocation failed\n",
+                           tf_dir_2_str(parms->dir));
+               return -1;
+       }
+#else
+       pool = (struct stack *)tfs->em_pool[parms->dir];
+       rc = stack_pop(pool, &index);
        if (rc) {
                PMD_DRV_LOG(ERR,
                            "%s, EM entry index allocation failed\n",
                            tf_dir_2_str(parms->dir));
                return rc;
        }
+#endif
+
 
        rptr_index = index;
        rc = tf_msg_insert_em_internal_entry(tfp,
@@ -166,19 +224,13 @@ tf_em_insert_int_entry(struct tf *tfp,
                                             &num_of_entries);
        if (rc) {
                /* Free the allocated index before returning */
+#if (TF_EM_ALLOC == 1)
+               dpool_free(pool, index);
+#else
                stack_push(pool, index);
+#endif
                return -1;
        }
-
-       PMD_DRV_LOG
-                 (DEBUG,
-                  "%s, Internal entry @ Index:%d rptr_index:0x%x rptr_entry:0x%x num_of_entries:%d\n",
-                  tf_dir_2_str(parms->dir),
-                  index,
-                  rptr_index,
-                  rptr_entry,
-                  num_of_entries);
-
        TF_SET_GFID(gfid,
                    ((rptr_index << TF_EM_INTERNAL_INDEX_SHIFT) |
                     rptr_entry),
@@ -211,16 +263,86 @@ tf_em_delete_int_entry(struct tf *tfp,
                       struct tf_delete_em_entry_parms *parms)
 {
        int rc = 0;
-       struct stack *pool = &em_pool[parms->dir];
+       struct tf_session *tfs;
+#if (TF_EM_ALLOC == 1)
+       struct dpool *pool;
+#else
+       struct stack *pool;
+#endif
+       /* Retrieve the session information */
+       rc = tf_session_get_session(tfp, &tfs);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Failed to lookup session, rc:%s\n",
+                           tf_dir_2_str(parms->dir),
+                           strerror(-rc));
+               return rc;
+       }
 
        rc = tf_msg_delete_em_entry(tfp, parms);
 
        /* Return resource to pool */
-       if (rc == 0)
+       if (rc == 0) {
+#if (TF_EM_ALLOC == 1)
+               pool = (struct dpool *)tfs->em_pool[parms->dir];
+               dpool_free(pool, parms->index);
+#else
+               pool = (struct stack *)tfs->em_pool[parms->dir];
                stack_push(pool, parms->index);
+#endif
+       }
+
+       return rc;
+}
+
+#if (TF_EM_ALLOC == 1)
+static int
+tf_em_move_callback(void *user_data,
+                   uint64_t entry_data,
+                   uint32_t new_index)
+{
+       int rc;
+       struct tf *tfp = (struct tf *)user_data;
+       struct tf_move_em_entry_parms parms;
+       struct tf_dev_info     *dev;
+       struct tf_session      *tfs;
+
+       memset(&parms, 0, sizeof(parms));
+
+       parms.tbl_scope_id = 0;
+       parms.flow_handle  = entry_data;
+       parms.new_index    = new_index;
+       TF_GET_DIR_FROM_FLOW_ID(entry_data, parms.dir);
+       parms.mem          = TF_MEM_INTERNAL;
+
+       /* Retrieve the session information */
+       rc = tf_session_get_session(tfp, &tfs);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Failed to lookup session, rc:%s\n",
+                           tf_dir_2_str(parms.dir),
+                           strerror(-rc));
+               return rc;
+       }
+
+       /* Retrieve the device information */
+       rc = tf_session_get_device(tfs, &dev);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Failed to lookup device, rc:%s\n",
+                           tf_dir_2_str(parms.dir),
+                           strerror(-rc));
+               return rc;
+       }
+
+       if (dev->ops->tf_dev_move_int_em_entry != NULL)
+               rc = dev->ops->tf_dev_move_int_em_entry(tfp, &parms);
+       else
+               rc = -EOPNOTSUPP;
 
        return rc;
 }
+#endif
 
 int
 tf_em_int_bind(struct tf *tfp,
@@ -311,14 +433,49 @@ tf_em_int_bind(struct tf *tfp,
                                            tf_dir_2_str(i));
                                return rc;
                        }
+#if (TF_EM_ALLOC == 1)
+                       /*
+                        * Allocate stack pool
+                        */
+                       cparms.nitems = 1;
+                       cparms.size = sizeof(struct dpool);
+                       cparms.alignment = 0;
+
+                       rc = tfp_calloc(&cparms);
 
-                       rc = tf_create_em_pool(i,
-                                              iparms.info->entry.stride,
-                                              iparms.info->entry.start);
+                       if (rc) {
+                               TFP_DRV_LOG(ERR,
+                                        "%s, EM stack allocation failure %s\n",
+                                        tf_dir_2_str(i),
+                                        strerror(-rc));
+                               return rc;
+                       }
+
+                       tfs->em_pool[i] = (struct dpool *)cparms.mem_va;
+
+                       rc = dpool_init(tfs->em_pool[i],
+                                       iparms.info->entry.start,
+                                       iparms.info->entry.stride,
+                                       7,
+                                       (void *)tfp,
+                                       tf_em_move_callback);
+#else
+                       rc = tf_create_em_pool(tfs,
+                                      i,
+                                      iparms.info->entry.stride,
+                                      iparms.info->entry.start);
+#endif
                        /* Logging handled in tf_create_em_pool */
                        if (rc)
                                return rc;
                }
+
+               if (rc) {
+                       TFP_DRV_LOG(ERR,
+                                   "%s: EM pool init failed\n",
+                                   tf_dir_2_str(i));
+                       return rc;
+               }
        }
 
        return 0;
@@ -343,7 +500,11 @@ tf_em_int_unbind(struct tf *tfp)
 
        if (!tf_session_is_shared_session(tfs)) {
                for (i = 0; i < TF_DIR_MAX; i++)
-                       tf_free_em_pool(i);
+#if (TF_EM_ALLOC == 1)
+                       dpool_free_all(tfs->em_pool[i]);
+#else
+               tf_free_em_pool(tfs, i);
+#endif
        }
 
        rc = tf_session_get_db(tfp, TF_MODULE_TYPE_EM, &em_db_ptr);
index 4a840f3..2ee8a1e 100644 (file)
@@ -1004,6 +1004,75 @@ tf_msg_delete_em_entry(struct tf *tfp,
        return 0;
 }
 
+int
+tf_msg_move_em_entry(struct tf *tfp,
+                    struct tf_move_em_entry_parms *em_parms)
+{
+       int rc;
+       struct tfp_send_msg_parms parms = { 0 };
+       struct hwrm_tf_em_move_input req = { 0 };
+       struct hwrm_tf_em_move_output resp = { 0 };
+       uint16_t flags;
+       uint8_t fw_session_id;
+       struct tf_dev_info *dev;
+       struct tf_session *tfs;
+
+       /* Retrieve the session information */
+       rc = tf_session_get_session_internal(tfp, &tfs);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Failed to lookup session, rc:%s\n",
+                           tf_dir_2_str(em_parms->dir),
+                           strerror(-rc));
+               return rc;
+       }
+
+       /* Retrieve the device information */
+       rc = tf_session_get_device(tfs, &dev);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Failed to lookup device, rc:%s\n",
+                           tf_dir_2_str(em_parms->dir),
+                           strerror(-rc));
+               return rc;
+       }
+
+       rc = tf_session_get_fw_session_id(tfp, &fw_session_id);
+       if (rc) {
+               TFP_DRV_LOG(ERR,
+                           "%s: Unable to lookup FW id, rc:%s\n",
+                           tf_dir_2_str(em_parms->dir),
+                           strerror(-rc));
+               return rc;
+       }
+
+       /* Populate the request */
+       req.fw_session_id = tfp_cpu_to_le_32(fw_session_id);
+
+       flags = (em_parms->dir == TF_DIR_TX ?
+                HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_TX :
+                HWRM_TF_EM_DELETE_INPUT_FLAGS_DIR_RX);
+       req.flags = tfp_cpu_to_le_16(flags);
+       req.flow_handle = tfp_cpu_to_le_64(em_parms->flow_handle);
+       req.new_index = tfp_cpu_to_le_32(em_parms->new_index);
+
+       parms.tf_type = HWRM_TF_EM_MOVE;
+       parms.req_data = (uint32_t *)&req;
+       parms.req_size = sizeof(req);
+       parms.resp_data = (uint32_t *)&resp;
+       parms.resp_size = sizeof(resp);
+       parms.mailbox = dev->ops->tf_dev_get_mailbox();
+
+       rc = tfp_send_msg_direct(tf_session_get_bp(tfs),
+                                &parms);
+       if (rc)
+               return rc;
+
+       em_parms->index = tfp_le_to_cpu_16(resp.em_index);
+
+       return 0;
+}
+
 int tf_msg_ext_em_ctxt_mem_alloc(struct tf *tfp,
                                struct hcapi_cfa_em_table *tbl,
                                uint64_t *dma_addr,
index 5ecaf9e..e8662fe 100644 (file)
@@ -315,6 +315,21 @@ tf_msg_hash_insert_em_internal_entry(struct tf *tfp,
 int tf_msg_delete_em_entry(struct tf *tfp,
                           struct tf_delete_em_entry_parms *em_parms);
 
+/**
+ * Sends EM internal move request to Firmware
+ *
+ * [in] tfp
+ *   Pointer to TF handle
+ *
+ * [in] em_parms
+ *   Pointer to em move parameters
+ *
+ * Returns:
+ *   0 on Success else internal Truflow error
+ */
+int tf_msg_move_em_entry(struct tf *tfp,
+                        struct tf_move_em_entry_parms *em_parms);
+
 /**
  * Sends Ext EM mem allocation request to Firmware
  *
index 0b8f63c..e2cebd2 100644 (file)
@@ -159,6 +159,11 @@ struct tf_session {
         * the pointer to the parent bp struct
         */
        void *bp;
+
+       /**
+        * EM allocator for session
+        */
+       void *em_pool[TF_DIR_MAX];
 };
 
 /**