From 5ecdeba601d13156379dde4e8bbca69a20c049af Mon Sep 17 00:00:00 2001 From: Bruce Richardson Date: Tue, 24 Nov 2015 14:25:56 +0000 Subject: [PATCH] lpm: merge tbl24 and tbl8 structures The tbl8 and tbl24 structures were essentially identical except for slightly different names for one or two fields. Merge these two structures into a single structure definition. Two fields have been renamed as part of this change: the "ext_entry" field in the tbl24 has been renamed to "valid_group" to match the tbl8 value to make the merge easier, and the "tbl8_gindex" field has been renamed to "group_idx". The "valid_group" field now serves two purposes: in a tbl8 it indicates if the group, i.e. the tbl8, is valid, and in a tbl24, it indicates if the "group_idx" is valid, i.e. whether the value is a next_hop or a tbl8 index. [The name "group_idx" was used to make this latter link between the fields clearer] Suggested-by: Vladimir Medvedkin Signed-off-by: Bruce Richardson --- lib/librte_lpm/rte_lpm.c | 77 ++++++++++++++++++++-------------------- lib/librte_lpm/rte_lpm.h | 49 ++++++++++++------------- 2 files changed, 61 insertions(+), 65 deletions(-) diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index cfdf9598b3..22f2ae9a2e 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -159,8 +159,7 @@ rte_lpm_create(const char *name, int socket_id, int max_rules, lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list); - RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl24_entry) != 2); - RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl8_entry) != 2); + RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 2); /* Check user arguments. */ if ((name == NULL) || (socket_id < -1) || (max_rules == 0)){ @@ -382,15 +381,15 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) * Find, clean and allocate a tbl8. */ static inline int32_t -tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8) +tbl8_alloc(struct rte_lpm_tbl_entry *tbl8) { - uint32_t tbl8_gindex; /* tbl8 group index. */ - struct rte_lpm_tbl8_entry *tbl8_entry; + uint32_t group_idx; /* tbl8 group index. */ + struct rte_lpm_tbl_entry *tbl8_entry; /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */ - for (tbl8_gindex = 0; tbl8_gindex < RTE_LPM_TBL8_NUM_GROUPS; - tbl8_gindex++) { - tbl8_entry = &tbl8[tbl8_gindex * + for (group_idx = 0; group_idx < RTE_LPM_TBL8_NUM_GROUPS; + group_idx++) { + tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; /* If a free tbl8 group is found clean it and set as VALID. */ if (!tbl8_entry->valid_group) { @@ -401,7 +400,7 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8) tbl8_entry->valid_group = VALID; /* Return group index for allocated tbl8 group. */ - return tbl8_gindex; + return group_idx; } } @@ -410,7 +409,7 @@ tbl8_alloc(struct rte_lpm_tbl8_entry *tbl8) } static inline void -tbl8_free(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) +tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) { /* Set tbl8 group invalid*/ tbl8[tbl8_group_start].valid_group = INVALID; @@ -431,13 +430,13 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, * For invalid OR valid and non-extended tbl 24 entries set * entry. */ - if (!lpm->tbl24[i].valid || (lpm->tbl24[i].ext_entry == 0 && + if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth)) { - struct rte_lpm_tbl24_entry new_tbl24_entry = { + struct rte_lpm_tbl_entry new_tbl24_entry = { { .next_hop = next_hop, }, .valid = VALID, - .ext_entry = 0, + .valid_group = 0, .depth = depth, }; @@ -449,11 +448,11 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, continue; } - if (lpm->tbl24[i].ext_entry == 1) { + if (lpm->tbl24[i].valid_group == 1) { /* If tbl24 entry is valid and extended calculate the * index into tbl8. */ - tbl8_index = lpm->tbl24[i].tbl8_gindex * + tbl8_index = lpm->tbl24[i].group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; @@ -461,7 +460,7 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth, for (j = tbl8_index; j < tbl8_group_end; j++) { if (!lpm->tbl8[j].valid || lpm->tbl8[j].depth <= depth) { - struct rte_lpm_tbl8_entry + struct rte_lpm_tbl_entry new_tbl8_entry = { .valid = VALID, .valid_group = VALID, @@ -522,17 +521,17 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * so assign whole structure in one go */ - struct rte_lpm_tbl24_entry new_tbl24_entry = { - { .tbl8_gindex = (uint8_t)tbl8_group_index, }, + struct rte_lpm_tbl_entry new_tbl24_entry = { + { .group_idx = (uint8_t)tbl8_group_index, }, .valid = VALID, - .ext_entry = 1, + .valid_group = 1, .depth = 0, }; lpm->tbl24[tbl24_index] = new_tbl24_entry; }/* If valid entry but not extended calculate the index into Table8. */ - else if (lpm->tbl24[tbl24_index].ext_entry == 0) { + else if (lpm->tbl24[tbl24_index].valid_group == 0) { /* Search for free tbl8 group. */ tbl8_group_index = tbl8_alloc(lpm->tbl8); @@ -573,10 +572,10 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, * so assign whole structure in one go. */ - struct rte_lpm_tbl24_entry new_tbl24_entry = { - { .tbl8_gindex = (uint8_t)tbl8_group_index, }, + struct rte_lpm_tbl_entry new_tbl24_entry = { + { .group_idx = (uint8_t)tbl8_group_index, }, .valid = VALID, - .ext_entry = 1, + .valid_group = 1, .depth = 0, }; @@ -586,7 +585,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, else { /* * If it is valid, extended entry calculate the index into tbl8. */ - tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex; + tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; tbl8_index = tbl8_group_start + (ip_masked & 0xFF); @@ -595,7 +594,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, if (!lpm->tbl8[i].valid || lpm->tbl8[i].depth <= depth) { - struct rte_lpm_tbl8_entry new_tbl8_entry = { + struct rte_lpm_tbl_entry new_tbl8_entry = { .valid = VALID, .depth = depth, .next_hop = next_hop, @@ -731,17 +730,17 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, */ for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - if (lpm->tbl24[i].ext_entry == 0 && + if (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth ) { lpm->tbl24[i].valid = INVALID; - } else if (lpm->tbl24[i].ext_entry == 1) { + } else if (lpm->tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has * to be a rule with depth >= 25 in the * associated TBL8 group. */ - tbl8_group_index = lpm->tbl24[i].tbl8_gindex; + tbl8_group_index = lpm->tbl24[i].group_idx; tbl8_index = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; @@ -760,14 +759,14 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, * associated with this rule. */ - struct rte_lpm_tbl24_entry new_tbl24_entry = { + struct rte_lpm_tbl_entry new_tbl24_entry = { {.next_hop = lpm->rules_tbl[sub_rule_index].next_hop,}, .valid = VALID, - .ext_entry = 0, + .valid_group = 0, .depth = sub_rule_depth, }; - struct rte_lpm_tbl8_entry new_tbl8_entry = { + struct rte_lpm_tbl_entry new_tbl8_entry = { .valid = VALID, .valid_group = VALID, .depth = sub_rule_depth, @@ -777,17 +776,17 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) { - if (lpm->tbl24[i].ext_entry == 0 && + if (lpm->tbl24[i].valid_group == 0 && lpm->tbl24[i].depth <= depth ) { lpm->tbl24[i] = new_tbl24_entry; - } else if (lpm->tbl24[i].ext_entry == 1) { + } else if (lpm->tbl24[i].valid_group == 1) { /* * If TBL24 entry is extended, then there has * to be a rule with depth >= 25 in the * associated TBL8 group. */ - tbl8_group_index = lpm->tbl24[i].tbl8_gindex; + tbl8_group_index = lpm->tbl24[i].group_idx; tbl8_index = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; @@ -813,7 +812,7 @@ delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked, * thus can be recycled */ static inline int32_t -tbl8_recycle_check(struct rte_lpm_tbl8_entry *tbl8, uint32_t tbl8_group_start) +tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) { uint32_t tbl8_group_end, i; tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES; @@ -873,7 +872,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, tbl24_index = ip_masked >> 8; /* Calculate the index into tbl8 and range. */ - tbl8_group_index = lpm->tbl24[tbl24_index].tbl8_gindex; + tbl8_group_index = lpm->tbl24[tbl24_index].group_idx; tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES; tbl8_index = tbl8_group_start + (ip_masked & 0xFF); tbl8_range = depth_to_range(depth); @@ -890,7 +889,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, } else { /* Set new tbl8 entry. */ - struct rte_lpm_tbl8_entry new_tbl8_entry = { + struct rte_lpm_tbl_entry new_tbl8_entry = { .valid = VALID, .depth = sub_rule_depth, .valid_group = lpm->tbl8[tbl8_group_start].valid_group, @@ -922,10 +921,10 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ - struct rte_lpm_tbl24_entry new_tbl24_entry = { + struct rte_lpm_tbl_entry new_tbl24_entry = { { .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop, }, .valid = VALID, - .ext_entry = 0, + .valid_group = 0, .depth = lpm->tbl8[tbl8_recycle_index].depth, }; diff --git a/lib/librte_lpm/rte_lpm.h b/lib/librte_lpm/rte_lpm.h index c299ce2fb4..8b4ec170b1 100644 --- a/lib/librte_lpm/rte_lpm.h +++ b/lib/librte_lpm/rte_lpm.h @@ -81,7 +81,7 @@ extern "C" { #define RTE_LPM_RETURN_IF_TRUE(cond, retval) #endif -/** @internal bitmask with valid and ext_entry/valid_group fields set */ +/** @internal bitmask with valid and valid_group fields set */ #define RTE_LPM_VALID_EXT_ENTRY_BITMASK 0x0300 /** Bitmask used to indicate successful lookup */ @@ -89,43 +89,40 @@ extern "C" { #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN /** @internal Tbl24 entry structure. */ -struct rte_lpm_tbl24_entry { - /* Stores Next hop or group index (i.e. gindex)into tbl8. */ +struct rte_lpm_tbl_entry { + /** + * Stores Next hop (tbl8 or tbl24 when valid_group is not set) or + * a group index pointing to a tbl8 structure (tbl24 only, when + * valid_group is set) + */ union { uint8_t next_hop; - uint8_t tbl8_gindex; + uint8_t group_idx; }; /* Using single uint8_t to store 3 values. */ - uint8_t valid :1; /**< Validation flag. */ - uint8_t ext_entry :1; /**< External entry. */ - uint8_t depth :6; /**< Rule depth. */ -}; - -/** @internal Tbl8 entry structure. */ -struct rte_lpm_tbl8_entry { - uint8_t next_hop; /**< next hop. */ - /* Using single uint8_t to store 3 values. */ - uint8_t valid :1; /**< Validation flag. */ - uint8_t valid_group :1; /**< Group validation flag. */ + uint8_t valid :1; /**< Validation flag. */ + /** + * For tbl24: + * - valid_group == 0: entry stores a next hop + * - valid_group == 1: entry stores a group_index pointing to a tbl8 + * For tbl8: + * - valid_group indicates whether the current tbl8 is in use or not + */ + uint8_t valid_group :1; uint8_t depth :6; /**< Rule depth. */ }; + #else -struct rte_lpm_tbl24_entry { +struct rte_lpm_tbl_entry { uint8_t depth :6; - uint8_t ext_entry :1; + uint8_t valid_group :1; uint8_t valid :1; union { - uint8_t tbl8_gindex; + uint8_t group_idx; uint8_t next_hop; }; }; -struct rte_lpm_tbl8_entry { - uint8_t depth :6; - uint8_t valid_group :1; - uint8_t valid :1; - uint8_t next_hop; -}; #endif /** @internal Rule structure. */ @@ -148,9 +145,9 @@ struct rte_lpm { struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH]; /**< Rule info table. */ /* LPM Tables. */ - struct rte_lpm_tbl24_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] \ + struct rte_lpm_tbl_entry tbl24[RTE_LPM_TBL24_NUM_ENTRIES] __rte_cache_aligned; /**< LPM tbl24 table. */ - struct rte_lpm_tbl8_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] \ + struct rte_lpm_tbl_entry tbl8[RTE_LPM_TBL8_NUM_ENTRIES] __rte_cache_aligned; /**< LPM tbl8 table. */ struct rte_lpm_rule rules_tbl[0] \ __rte_cache_aligned; /**< LPM rules. */ -- 2.20.1