/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2020 Arm Limited
*/
#include <string.h>
VALID
};
+/** @internal LPM structure. */
+struct __rte_lpm {
+ /* LPM metadata. */
+ struct rte_lpm lpm;
+
+ /* RCU config. */
+ struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
+ enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */
+ struct rte_rcu_qsbr_dq *dq; /* RCU QSBR defer queue. */
+};
+
/* Macro to enable/disable run-time checks. */
#if defined(RTE_LIBRTE_LPM_DEBUG)
#include <rte_debug.h>
const struct rte_lpm_config *config)
{
char mem_name[RTE_LPM_NAMESIZE];
+ struct __rte_lpm *internal_lpm;
struct rte_lpm *lpm = NULL;
struct rte_tailq_entry *te;
uint32_t mem_size, rules_size, tbl8s_size;
snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
- /* Determine the amount of memory to allocate. */
- mem_size = sizeof(*lpm);
- rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
- tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
- RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
-
rte_mcfg_tailq_write_lock();
/* guarantee there's no existing */
goto exit;
}
+ /* Determine the amount of memory to allocate. */
+ mem_size = sizeof(*internal_lpm);
+ rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
+ tbl8s_size = sizeof(struct rte_lpm_tbl_entry) *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s;
+
/* allocate tailq entry */
te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
if (te == NULL) {
}
/* Allocate memory to store the LPM data structures. */
- lpm = rte_zmalloc_socket(mem_name, mem_size,
+ internal_lpm = rte_zmalloc_socket(mem_name, mem_size,
RTE_CACHE_LINE_SIZE, socket_id);
- if (lpm == NULL) {
+ if (internal_lpm == NULL) {
RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
rte_free(te);
rte_errno = ENOMEM;
goto exit;
}
+ lpm = &internal_lpm->lpm;
lpm->rules_tbl = rte_zmalloc_socket(NULL,
(size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
if (lpm->rules_tbl == NULL) {
RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
- rte_free(lpm);
+ rte_free(internal_lpm);
+ internal_lpm = NULL;
lpm = NULL;
rte_free(te);
rte_errno = ENOMEM;
if (lpm->tbl8 == NULL) {
RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
rte_free(lpm->rules_tbl);
- rte_free(lpm);
+ rte_free(internal_lpm);
+ internal_lpm = NULL;
lpm = NULL;
rte_free(te);
rte_errno = ENOMEM;
void
rte_lpm_free(struct rte_lpm *lpm)
{
+ struct __rte_lpm *internal_lpm;
struct rte_lpm_list *lpm_list;
struct rte_tailq_entry *te;
rte_mcfg_tailq_write_unlock();
+ internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
+ if (internal_lpm->dq != NULL)
+ rte_rcu_qsbr_dq_delete(internal_lpm->dq);
rte_free(lpm->tbl8);
rte_free(lpm->rules_tbl);
rte_free(lpm);
rte_free(te);
}
+static void
+__lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
+{
+ struct rte_lpm_tbl_entry *tbl8 = ((struct rte_lpm *)p)->tbl8;
+ struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
+ uint32_t tbl8_group_index = *(uint32_t *)data;
+
+ RTE_SET_USED(n);
+ /* Set tbl8 group invalid */
+ __atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
+ __ATOMIC_RELAXED);
+}
+
+/* Associate QSBR variable with an LPM object.
+ */
+int
+rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg,
+ struct rte_rcu_qsbr_dq **dq)
+{
+ struct rte_rcu_qsbr_dq_parameters params = {0};
+ char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
+ struct __rte_lpm *internal_lpm;
+
+ if (lpm == NULL || cfg == NULL) {
+ rte_errno = EINVAL;
+ return 1;
+ }
+
+ internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
+ if (internal_lpm->v != NULL) {
+ rte_errno = EEXIST;
+ return 1;
+ }
+
+ if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) {
+ /* No other things to do. */
+ } else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
+ /* Init QSBR defer queue. */
+ snprintf(rcu_dq_name, sizeof(rcu_dq_name),
+ "LPM_RCU_%s", lpm->name);
+ params.name = rcu_dq_name;
+ params.size = cfg->dq_size;
+ if (params.size == 0)
+ params.size = lpm->number_tbl8s;
+ params.trigger_reclaim_limit = cfg->reclaim_thd;
+ params.max_reclaim_size = cfg->reclaim_max;
+ if (params.max_reclaim_size == 0)
+ params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX;
+ params.esize = sizeof(uint32_t); /* tbl8 group index */
+ params.free_fn = __lpm_rcu_qsbr_free_resource;
+ params.p = lpm;
+ params.v = cfg->v;
+ internal_lpm->dq = rte_rcu_qsbr_dq_create(¶ms);
+ if (internal_lpm->dq == NULL) {
+ RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n");
+ return 1;
+ }
+ if (dq != NULL)
+ *dq = internal_lpm->dq;
+ } else {
+ rte_errno = EINVAL;
+ return 1;
+ }
+ internal_lpm->rcu_mode = cfg->mode;
+ internal_lpm->v = cfg->v;
+
+ return 0;
+}
+
/*
* Adds a rule to the rule table.
*
* Find, clean and allocate a tbl8.
*/
static int32_t
-tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
+_tbl8_alloc(struct rte_lpm *lpm)
{
uint32_t group_idx; /* tbl8 group index. */
struct rte_lpm_tbl_entry *tbl8_entry;
/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
- for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
- tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
+ for (group_idx = 0; group_idx < lpm->number_tbl8s; group_idx++) {
+ tbl8_entry = &lpm->tbl8[group_idx *
+ RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
/* If a free tbl8 group is found clean it and set as VALID. */
if (!tbl8_entry->valid_group) {
struct rte_lpm_tbl_entry new_tbl8_entry = {
return -ENOSPC;
}
+static int32_t
+tbl8_alloc(struct rte_lpm *lpm)
+{
+ int32_t group_idx; /* tbl8 group index. */
+ struct __rte_lpm *internal_lpm;
+
+ internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
+ group_idx = _tbl8_alloc(lpm);
+ if (group_idx == -ENOSPC && internal_lpm->dq != NULL) {
+ /* If there are no tbl8 groups try to reclaim one. */
+ if (rte_rcu_qsbr_dq_reclaim(internal_lpm->dq, 1,
+ NULL, NULL, NULL) == 0)
+ group_idx = _tbl8_alloc(lpm);
+ }
+
+ return group_idx;
+}
+
static void
-tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
+tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start)
{
- /* Set tbl8 group invalid*/
struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
+ struct __rte_lpm *internal_lpm;
- __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
- __ATOMIC_RELAXED);
+ internal_lpm = container_of(lpm, struct __rte_lpm, lpm);
+ if (internal_lpm->v == NULL) {
+ /* Set tbl8 group invalid*/
+ __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry,
+ __ATOMIC_RELAXED);
+ } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
+ /* Wait for quiescent state change. */
+ rte_rcu_qsbr_synchronize(internal_lpm->v,
+ RTE_QSBR_THRID_INVALID);
+ /* Set tbl8 group invalid*/
+ __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry,
+ __ATOMIC_RELAXED);
+ } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
+ /* Push into QSBR defer queue. */
+ rte_rcu_qsbr_dq_enqueue(internal_lpm->dq,
+ (void *)&tbl8_group_start);
+ }
}
static __rte_noinline int32_t
if (!lpm->tbl24[tbl24_index].valid) {
/* Search for a free tbl8 group. */
- tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
+ tbl8_group_index = tbl8_alloc(lpm);
/* Check tbl8 allocation was successful. */
if (tbl8_group_index < 0) {
} /* If valid entry but not extended calculate the index into Table8. */
else if (lpm->tbl24[tbl24_index].valid_group == 0) {
/* Search for free tbl8 group. */
- tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
+ tbl8_group_index = tbl8_alloc(lpm);
if (tbl8_group_index < 0) {
return tbl8_group_index;
*/
lpm->tbl24[tbl24_index].valid = 0;
__atomic_thread_fence(__ATOMIC_RELEASE);
- tbl8_free(lpm->tbl8, tbl8_group_start);
+ tbl8_free(lpm, tbl8_group_start);
} else if (tbl8_recycle_index > -1) {
/* Update tbl24 entry. */
struct rte_lpm_tbl_entry new_tbl24_entry = {
__atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
__ATOMIC_RELAXED);
__atomic_thread_fence(__ATOMIC_RELEASE);
- tbl8_free(lpm->tbl8, tbl8_group_start);
+ tbl8_free(lpm, tbl8_group_start);
}
#undef group_idx
return 0;
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2014 Intel Corporation
+ * Copyright(c) 2020 Arm Limited
*/
#ifndef _RTE_LPM_H_
#include <rte_memory.h>
#include <rte_common.h>
#include <rte_vect.h>
+#include <rte_rcu_qsbr.h>
#ifdef __cplusplus
extern "C" {
/** Bitmask used to indicate successful lookup */
#define RTE_LPM_LOOKUP_SUCCESS 0x01000000
+/** @internal Default RCU defer queue entries to reclaim in one go. */
+#define RTE_LPM_RCU_DQ_RECLAIM_MAX 16
+
+/** RCU reclamation modes */
+enum rte_lpm_qsbr_mode {
+ /** Create defer queue for reclaim. */
+ RTE_LPM_QSBR_MODE_DQ = 0,
+ /** Use blocking mode reclaim. No defer queue created. */
+ RTE_LPM_QSBR_MODE_SYNC
+};
+
#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
/** @internal Tbl24 entry structure. */
__extension__
struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
};
+/** LPM RCU QSBR configuration structure. */
+struct rte_lpm_rcu_config {
+ struct rte_rcu_qsbr *v; /* RCU QSBR variable. */
+ /* Mode of RCU QSBR. RTE_LPM_QSBR_MODE_xxx
+ * '0' for default: create defer queue for reclaim.
+ */
+ enum rte_lpm_qsbr_mode mode;
+ uint32_t dq_size; /* RCU defer queue size.
+ * default: lpm->number_tbl8s.
+ */
+ uint32_t reclaim_thd; /* Threshold to trigger auto reclaim. */
+ uint32_t reclaim_max; /* Max entries to reclaim in one go.
+ * default: RTE_LPM_RCU_DQ_RECLAIM_MAX.
+ */
+};
+
/**
* Create an LPM object.
*
void
rte_lpm_free(struct rte_lpm *lpm);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice
+ *
+ * Associate RCU QSBR variable with an LPM object.
+ *
+ * @param lpm
+ * the lpm object to add RCU QSBR
+ * @param cfg
+ * RCU QSBR configuration
+ * @param dq
+ * handler of created RCU QSBR defer queue
+ * @return
+ * On success - 0
+ * On error - 1 with error code set in rte_errno.
+ * Possible rte_errno codes are:
+ * - EINVAL - invalid pointer
+ * - EEXIST - already added QSBR
+ * - ENOMEM - memory allocation failure
+ */
+__rte_experimental
+int rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg,
+ struct rte_rcu_qsbr_dq **dq);
+
/**
* Add a rule to the LPM table.
*