-add_depth_big_v20(struct rte_lpm_v20 *lpm, uint32_t ip_masked, uint8_t depth,
- uint8_t next_hop)
-{
- uint32_t tbl24_index;
- int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
- tbl8_range, i;
-
- tbl24_index = (ip_masked >> 8);
- tbl8_range = depth_to_range(depth);
-
- if (!lpm->tbl24[tbl24_index].valid) {
- /* Search for a free tbl8 group. */
- tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
-
- /* Check tbl8 allocation was successful. */
- if (tbl8_group_index < 0) {
- return tbl8_group_index;
- }
-
- /* Find index into tbl8 and range. */
- tbl8_index = (tbl8_group_index *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
- (ip_masked & 0xFF);
-
- /* Set tbl8 entry. */
- for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
- struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
- .valid = VALID,
- .depth = depth,
- .valid_group = lpm->tbl8[i].valid_group,
- };
- new_tbl8_entry.next_hop = next_hop;
- __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
- __ATOMIC_RELAXED);
- }
-
- /*
- * Update tbl24 entry to point to new tbl8 entry. Note: The
- * ext_flag and tbl8_index need to be updated simultaneously,
- * so assign whole structure in one go
- */
-
- struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
- .group_idx = (uint8_t)tbl8_group_index,
- .valid = VALID,
- .valid_group = 1,
- .depth = 0,
- };
-
- __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
- __ATOMIC_RELEASE);
-
- } /* If valid entry but not extended calculate the index into Table8. */
- else if (lpm->tbl24[tbl24_index].valid_group == 0) {
- /* Search for free tbl8 group. */
- tbl8_group_index = tbl8_alloc_v20(lpm->tbl8);
-
- if (tbl8_group_index < 0) {
- return tbl8_group_index;
- }
-
- tbl8_group_start = tbl8_group_index *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl8_group_end = tbl8_group_start +
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-
- /* Populate new tbl8 with tbl24 value. */
- for (i = tbl8_group_start; i < tbl8_group_end; i++) {
- struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
- .valid = VALID,
- .depth = lpm->tbl24[tbl24_index].depth,
- .valid_group = lpm->tbl8[i].valid_group,
- };
- new_tbl8_entry.next_hop =
- lpm->tbl24[tbl24_index].next_hop;
- __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
- __ATOMIC_RELAXED);
- }
-
- tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
-
- /* Insert new rule into the tbl8 entry. */
- for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
- struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
- .valid = VALID,
- .depth = depth,
- .valid_group = lpm->tbl8[i].valid_group,
- };
- new_tbl8_entry.next_hop = next_hop;
- __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
- __ATOMIC_RELAXED);
- }
-
- /*
- * Update tbl24 entry to point to new tbl8 entry. Note: The
- * ext_flag and tbl8_index need to be updated simultaneously,
- * so assign whole structure in one go.
- */
-
- struct rte_lpm_tbl_entry_v20 new_tbl24_entry = {
- .group_idx = (uint8_t)tbl8_group_index,
- .valid = VALID,
- .valid_group = 1,
- .depth = 0,
- };
-
- __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
- __ATOMIC_RELEASE);
-
- } else { /*
- * If it is valid, extended entry calculate the index into tbl8.
- */
- tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
- tbl8_group_start = tbl8_group_index *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
- tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
-
- for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
-
- if (!lpm->tbl8[i].valid ||
- lpm->tbl8[i].depth <= depth) {
- struct rte_lpm_tbl_entry_v20 new_tbl8_entry = {
- .valid = VALID,
- .depth = depth,
- .valid_group = lpm->tbl8[i].valid_group,
- };
- new_tbl8_entry.next_hop = next_hop;
- /*
- * Setting tbl8 entry in one go to avoid race
- * condition
- */
- __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
- __ATOMIC_RELAXED);
-
- continue;
- }
- }
- }
-
- return 0;
-}
-
-static __rte_noinline int32_t
-add_depth_big_v1604(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,