+/*
+ * Test for recycle of tbl8
+ * - step 1: add a rule with depth=28 (> 24)
+ * - step 2: add a rule with same 24-bit prefix and depth=23 (< 24)
+ * - step 3: delete the first rule
+ * - step 4: check tbl8 is freed
+ * - step 5: add a rule same as the first one (depth=28)
+ * - step 6: check same tbl8 is allocated
+ * - step 7: add a rule with same 24-bit prefix and depth=24
+ * - step 8: delete the rule (depth=28) added in step 5
+ * - step 9: check tbl8 is freed
+ * - step 10: add a rule with same 24-bit prefix and depth = 28
+ * - setp 11: check same tbl8 is allocated again
+ */
+int32_t
+test18(void)
+{
+#define group_idx next_hop
+ struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+ uint32_t ip, next_hop;
+ uint8_t depth;
+ uint32_t tbl8_group_index;
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ ip = RTE_IPV4(192, 168, 100, 100);
+ depth = 28;
+ next_hop = 1;
+ rte_lpm_add(lpm, ip, depth, next_hop);
+
+ TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
+ tbl8_group_index = lpm->tbl24[ip>>8].group_idx;
+
+ depth = 23;
+ next_hop = 2;
+ rte_lpm_add(lpm, ip, depth, next_hop);
+ TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
+
+ depth = 28;
+ rte_lpm_delete(lpm, ip, depth);
+
+ TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group);
+
+ next_hop = 3;
+ rte_lpm_add(lpm, ip, depth, next_hop);
+
+ TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
+ TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx);
+
+ depth = 24;
+ next_hop = 4;
+ rte_lpm_add(lpm, ip, depth, next_hop);
+ TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
+
+ depth = 28;
+ rte_lpm_delete(lpm, ip, depth);
+
+ TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group);
+
+ next_hop = 5;
+ rte_lpm_add(lpm, ip, depth, next_hop);
+
+ TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
+ TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx);
+
+ rte_lpm_free(lpm);
+#undef group_idx
+ return PASS;
+}
+
+/*
+ * rte_lpm_rcu_qsbr_add positive and negative tests.
+ * - Add RCU QSBR variable to LPM
+ * - Add another RCU QSBR variable to LPM
+ * - Check returns
+ */
+int32_t
+test19(void)
+{
+ struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+ size_t sz;
+ struct rte_rcu_qsbr *qsv;
+ struct rte_rcu_qsbr *qsv2;
+ int32_t status;
+ struct rte_lpm_rcu_config rcu_cfg = {0};
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = NUMBER_TBL8S;
+ config.flags = 0;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Create RCU QSBR variable */
+ sz = rte_rcu_qsbr_get_memsize(RTE_MAX_LCORE);
+ qsv = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ TEST_LPM_ASSERT(qsv != NULL);
+
+ status = rte_rcu_qsbr_init(qsv, RTE_MAX_LCORE);
+ TEST_LPM_ASSERT(status == 0);
+
+ rcu_cfg.v = qsv;
+ /* Invalid QSBR mode */
+ rcu_cfg.mode = 2;
+ status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
+ TEST_LPM_ASSERT(status != 0);
+
+ rcu_cfg.mode = RTE_LPM_QSBR_MODE_DQ;
+ /* Attach RCU QSBR to LPM table */
+ status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
+ TEST_LPM_ASSERT(status == 0);
+
+ /* Create and attach another RCU QSBR to LPM table */
+ qsv2 = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ TEST_LPM_ASSERT(qsv2 != NULL);
+
+ rcu_cfg.v = qsv2;
+ rcu_cfg.mode = RTE_LPM_QSBR_MODE_SYNC;
+ status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
+ TEST_LPM_ASSERT(status != 0);
+
+ rte_lpm_free(lpm);
+ rte_free(qsv);
+ rte_free(qsv2);
+
+ return PASS;
+}
+
+/*
+ * rte_lpm_rcu_qsbr_add DQ mode functional test.
+ * Reader and writer are in the same thread in this test.
+ * - Create LPM which supports 1 tbl8 group at max
+ * - Add RCU QSBR variable to LPM
+ * - Add a rule with depth=28 (> 24)
+ * - Register a reader thread (not a real thread)
+ * - Reader lookup existing rule
+ * - Writer delete the rule
+ * - Reader lookup the rule
+ * - Writer re-add the rule (no available tbl8 group)
+ * - Reader report quiescent state and unregister
+ * - Writer re-add the rule
+ * - Reader lookup the rule
+ */
+int32_t
+test20(void)
+{
+ struct rte_lpm *lpm = NULL;
+ struct rte_lpm_config config;
+ size_t sz;
+ struct rte_rcu_qsbr *qsv;
+ int32_t status;
+ uint32_t ip, next_hop, next_hop_return;
+ uint8_t depth;
+ struct rte_lpm_rcu_config rcu_cfg = {0};
+
+ config.max_rules = MAX_RULES;
+ config.number_tbl8s = 1;
+ config.flags = 0;
+
+ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
+ TEST_LPM_ASSERT(lpm != NULL);
+
+ /* Create RCU QSBR variable */
+ sz = rte_rcu_qsbr_get_memsize(1);
+ qsv = (struct rte_rcu_qsbr *)rte_zmalloc_socket(NULL, sz,
+ RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ TEST_LPM_ASSERT(qsv != NULL);
+
+ status = rte_rcu_qsbr_init(qsv, 1);
+ TEST_LPM_ASSERT(status == 0);
+
+ rcu_cfg.v = qsv;
+ rcu_cfg.mode = RTE_LPM_QSBR_MODE_DQ;
+ /* Attach RCU QSBR to LPM table */
+ status = rte_lpm_rcu_qsbr_add(lpm, &rcu_cfg);
+ TEST_LPM_ASSERT(status == 0);
+
+ ip = RTE_IPV4(192, 0, 2, 100);
+ depth = 28;
+ next_hop = 1;
+ status = rte_lpm_add(lpm, ip, depth, next_hop);
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group);
+
+ /* Register pseudo reader */
+ status = rte_rcu_qsbr_thread_register(qsv, 0);
+ TEST_LPM_ASSERT(status == 0);
+ rte_rcu_qsbr_thread_online(qsv, 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop);
+
+ /* Writer update */
+ status = rte_lpm_delete(lpm, ip, depth);
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status != 0);
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop);
+ TEST_LPM_ASSERT(status != 0);
+
+ /* Reader quiescent */
+ rte_rcu_qsbr_quiescent(qsv, 0);
+
+ status = rte_lpm_add(lpm, ip, depth, next_hop);
+ TEST_LPM_ASSERT(status == 0);
+
+ rte_rcu_qsbr_thread_offline(qsv, 0);
+ status = rte_rcu_qsbr_thread_unregister(qsv, 0);
+ TEST_LPM_ASSERT(status == 0);
+
+ status = rte_lpm_lookup(lpm, ip, &next_hop_return);
+ TEST_LPM_ASSERT(status == 0);
+ TEST_LPM_ASSERT(next_hop_return == next_hop);
+
+ rte_lpm_free(lpm);
+ rte_free(qsv);
+
+ return PASS;
+}
+
+static struct rte_lpm *g_lpm;
+static struct rte_rcu_qsbr *g_v;
+static uint32_t g_ip = RTE_IPV4(192, 0, 2, 100);
+static volatile uint8_t writer_done;
+/* Report quiescent state interval every 1024 lookups. Larger critical
+ * sections in reader will result in writer polling multiple times.
+ */
+#define QSBR_REPORTING_INTERVAL 1024
+#define WRITER_ITERATIONS 512