lpm: fix depth small entry add
authorZhe Tao <zhe.tao@intel.com>
Thu, 30 Jul 2015 03:19:02 +0000 (11:19 +0800)
committerThomas Monjalon <thomas.monjalon@6wind.com>
Mon, 3 Aug 2015 11:50:00 +0000 (13:50 +0200)
When adding a "depth small" entry, if its extended flag is not set and
its depth is smaller than the one in the tbl24, nothing should be done
otherwise will operate on the wrong memory area.

Signed-off-by: Zhe Tao <zhe.tao@intel.com>
Acked-by: Cunming Liang <cunming.liang@intel.com>
lib/librte_lpm/rte_lpm.c

index de05307..163ba3c 100644 (file)
@@ -442,35 +442,41 @@ add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
                        };
 
                        /* Setting tbl24 entry in one go to avoid race
-                        * conditions */
+                        * conditions
+                        */
                        lpm->tbl24[i] = new_tbl24_entry;
 
                        continue;
                }
 
-               /* If tbl24 entry is valid and extended calculate the index
-                * into tbl8. */
-               tbl8_index = lpm->tbl24[i].tbl8_gindex *
-                               RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-               tbl8_group_end = tbl8_index + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
-
-               for (j = tbl8_index; j < tbl8_group_end; j++) {
-                       if (!lpm->tbl8[j].valid ||
-                                       lpm->tbl8[j].depth <= depth) {
-                               struct rte_lpm_tbl8_entry new_tbl8_entry = {
-                                       .valid = VALID,
-                                       .valid_group = VALID,
-                                       .depth = depth,
-                                       .next_hop = next_hop,
-                               };
-
-                               /*
-                                * Setting tbl8 entry in one go to avoid race
-                                * conditions
-                                */
-                               lpm->tbl8[j] = new_tbl8_entry;
-
-                               continue;
+               if (lpm->tbl24[i].ext_entry == 1) {
+                       /* If tbl24 entry is valid and extended calculate the
+                        *  index into tbl8.
+                        */
+                       tbl8_index = lpm->tbl24[i].tbl8_gindex *
+                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+                       tbl8_group_end = tbl8_index +
+                                       RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
+
+                       for (j = tbl8_index; j < tbl8_group_end; j++) {
+                               if (!lpm->tbl8[j].valid ||
+                                               lpm->tbl8[j].depth <= depth) {
+                                       struct rte_lpm_tbl8_entry
+                                               new_tbl8_entry = {
+                                               .valid = VALID,
+                                               .valid_group = VALID,
+                                               .depth = depth,
+                                               .next_hop = next_hop,
+                                       };
+
+                                       /*
+                                        * Setting tbl8 entry in one go to avoid
+                                        * race conditions
+                                        */
+                                       lpm->tbl8[j] = new_tbl8_entry;
+
+                                       continue;
+                               }
                        }
                }
        }