X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_lpm%2Frte_lpm.c;h=126fc5a82dbd41c8419d3d76103f65bc3993a5e1;hb=8a9f8564e9f916d469492f5c5a4dc5886e814e05;hp=38ab512a42c3c25875f9205a6a359db1bee32da2;hpb=e0a439466b9d63e3a5cc063efbf08784300b54c4;p=dpdk.git diff --git a/lib/librte_lpm/rte_lpm.c b/lib/librte_lpm/rte_lpm.c index 38ab512a42..126fc5a82d 100644 --- a/lib/librte_lpm/rte_lpm.c +++ b/lib/librte_lpm/rte_lpm.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2010-2014 Intel Corporation + * Copyright(c) 2020 Arm Limited */ #include @@ -39,6 +40,17 @@ enum valid_flag { VALID }; +/** @internal LPM structure. */ +struct __rte_lpm { + /* LPM metadata. */ + struct rte_lpm lpm; + + /* RCU config. */ + struct rte_rcu_qsbr *v; /* RCU QSBR variable. */ + enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */ + struct rte_rcu_qsbr_dq *dq; /* RCU QSBR defer queue. */ +}; + /* Macro to enable/disable run-time checks. */ #if defined(RTE_LIBRTE_LPM_DEBUG) #include @@ -122,6 +134,7 @@ rte_lpm_create(const char *name, int socket_id, const struct rte_lpm_config *config) { char mem_name[RTE_LPM_NAMESIZE]; + struct __rte_lpm *internal_lpm; struct rte_lpm *lpm = NULL; struct rte_tailq_entry *te; uint32_t mem_size, rules_size, tbl8s_size; @@ -140,12 +153,6 @@ rte_lpm_create(const char *name, int socket_id, snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); - /* Determine the amount of memory to allocate. */ - mem_size = sizeof(*lpm); - rules_size = sizeof(struct rte_lpm_rule) * config->max_rules; - tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) * - RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s); - rte_mcfg_tailq_write_lock(); /* guarantee there's no existing */ @@ -161,6 +168,12 @@ rte_lpm_create(const char *name, int socket_id, goto exit; } + /* Determine the amount of memory to allocate. */ + mem_size = sizeof(*internal_lpm); + rules_size = sizeof(struct rte_lpm_rule) * config->max_rules; + tbl8s_size = sizeof(struct rte_lpm_tbl_entry) * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s; + /* allocate tailq entry */ te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0); if (te == NULL) { @@ -170,21 +183,23 @@ rte_lpm_create(const char *name, int socket_id, } /* Allocate memory to store the LPM data structures. */ - lpm = rte_zmalloc_socket(mem_name, mem_size, + internal_lpm = rte_zmalloc_socket(mem_name, mem_size, RTE_CACHE_LINE_SIZE, socket_id); - if (lpm == NULL) { + if (internal_lpm == NULL) { RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); rte_free(te); rte_errno = ENOMEM; goto exit; } + lpm = &internal_lpm->lpm; lpm->rules_tbl = rte_zmalloc_socket(NULL, (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id); if (lpm->rules_tbl == NULL) { RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n"); - rte_free(lpm); + rte_free(internal_lpm); + internal_lpm = NULL; lpm = NULL; rte_free(te); rte_errno = ENOMEM; @@ -197,7 +212,8 @@ rte_lpm_create(const char *name, int socket_id, if (lpm->tbl8 == NULL) { RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n"); rte_free(lpm->rules_tbl); - rte_free(lpm); + rte_free(internal_lpm); + internal_lpm = NULL; lpm = NULL; rte_free(te); rte_errno = ENOMEM; @@ -225,6 +241,7 @@ exit: void rte_lpm_free(struct rte_lpm *lpm) { + struct __rte_lpm *internal_lpm; struct rte_lpm_list *lpm_list; struct rte_tailq_entry *te; @@ -246,12 +263,84 @@ rte_lpm_free(struct rte_lpm *lpm) rte_mcfg_tailq_write_unlock(); + internal_lpm = container_of(lpm, struct __rte_lpm, lpm); + if (internal_lpm->dq != NULL) + rte_rcu_qsbr_dq_delete(internal_lpm->dq); rte_free(lpm->tbl8); rte_free(lpm->rules_tbl); rte_free(lpm); rte_free(te); } +static void +__lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n) +{ + struct rte_lpm_tbl_entry *tbl8 = ((struct rte_lpm *)p)->tbl8; + struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; + uint32_t tbl8_group_index = *(uint32_t *)data; + + RTE_SET_USED(n); + /* Set tbl8 group invalid */ + __atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry, + __ATOMIC_RELAXED); +} + +/* Associate QSBR variable with an LPM object. + */ +int +rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg, + struct rte_rcu_qsbr_dq **dq) +{ + struct rte_rcu_qsbr_dq_parameters params = {0}; + char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE]; + struct __rte_lpm *internal_lpm; + + if (lpm == NULL || cfg == NULL) { + rte_errno = EINVAL; + return 1; + } + + internal_lpm = container_of(lpm, struct __rte_lpm, lpm); + if (internal_lpm->v != NULL) { + rte_errno = EEXIST; + return 1; + } + + if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) { + /* No other things to do. */ + } else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) { + /* Init QSBR defer queue. */ + snprintf(rcu_dq_name, sizeof(rcu_dq_name), + "LPM_RCU_%s", lpm->name); + params.name = rcu_dq_name; + params.size = cfg->dq_size; + if (params.size == 0) + params.size = lpm->number_tbl8s; + params.trigger_reclaim_limit = cfg->reclaim_thd; + params.max_reclaim_size = cfg->reclaim_max; + if (params.max_reclaim_size == 0) + params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX; + params.esize = sizeof(uint32_t); /* tbl8 group index */ + params.free_fn = __lpm_rcu_qsbr_free_resource; + params.p = lpm; + params.v = cfg->v; + internal_lpm->dq = rte_rcu_qsbr_dq_create(¶ms); + if (internal_lpm->dq == NULL) { + RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n"); + return 1; + } + if (dq != NULL) + *dq = internal_lpm->dq; + } else { + rte_errno = EINVAL; + return 1; + } + internal_lpm->rcu_mode = cfg->mode; + internal_lpm->v = cfg->v; + + return 0; +} + /* * Adds a rule to the rule table. * @@ -394,14 +483,15 @@ rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth) * Find, clean and allocate a tbl8. */ static int32_t -tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) +_tbl8_alloc(struct rte_lpm *lpm) { uint32_t group_idx; /* tbl8 group index. */ struct rte_lpm_tbl_entry *tbl8_entry; /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */ - for (group_idx = 0; group_idx < number_tbl8s; group_idx++) { - tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; + for (group_idx = 0; group_idx < lpm->number_tbl8s; group_idx++) { + tbl8_entry = &lpm->tbl8[group_idx * + RTE_LPM_TBL8_GROUP_NUM_ENTRIES]; /* If a free tbl8 group is found clean it and set as VALID. */ if (!tbl8_entry->valid_group) { struct rte_lpm_tbl_entry new_tbl8_entry = { @@ -427,14 +517,47 @@ tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s) return -ENOSPC; } +static int32_t +tbl8_alloc(struct rte_lpm *lpm) +{ + int32_t group_idx; /* tbl8 group index. */ + struct __rte_lpm *internal_lpm; + + internal_lpm = container_of(lpm, struct __rte_lpm, lpm); + group_idx = _tbl8_alloc(lpm); + if (group_idx == -ENOSPC && internal_lpm->dq != NULL) { + /* If there are no tbl8 groups try to reclaim one. */ + if (rte_rcu_qsbr_dq_reclaim(internal_lpm->dq, 1, + NULL, NULL, NULL) == 0) + group_idx = _tbl8_alloc(lpm); + } + + return group_idx; +} + static void -tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start) +tbl8_free(struct rte_lpm *lpm, uint32_t tbl8_group_start) { - /* Set tbl8 group invalid*/ struct rte_lpm_tbl_entry zero_tbl8_entry = {0}; + struct __rte_lpm *internal_lpm; - __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry, - __ATOMIC_RELAXED); + internal_lpm = container_of(lpm, struct __rte_lpm, lpm); + if (internal_lpm->v == NULL) { + /* Set tbl8 group invalid*/ + __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry, + __ATOMIC_RELAXED); + } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) { + /* Wait for quiescent state change. */ + rte_rcu_qsbr_synchronize(internal_lpm->v, + RTE_QSBR_THRID_INVALID); + /* Set tbl8 group invalid*/ + __atomic_store(&lpm->tbl8[tbl8_group_start], &zero_tbl8_entry, + __ATOMIC_RELAXED); + } else if (internal_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) { + /* Push into QSBR defer queue. */ + rte_rcu_qsbr_dq_enqueue(internal_lpm->dq, + (void *)&tbl8_group_start); + } } static __rte_noinline int32_t @@ -523,7 +646,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, if (!lpm->tbl24[tbl24_index].valid) { /* Search for a free tbl8 group. */ - tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s); + tbl8_group_index = tbl8_alloc(lpm); /* Check tbl8 allocation was successful. */ if (tbl8_group_index < 0) { @@ -569,7 +692,7 @@ add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth, } /* If valid entry but not extended calculate the index into Table8. */ else if (lpm->tbl24[tbl24_index].valid_group == 0) { /* Search for free tbl8 group. */ - tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s); + tbl8_group_index = tbl8_alloc(lpm); if (tbl8_group_index < 0) { return tbl8_group_index; @@ -977,7 +1100,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, */ lpm->tbl24[tbl24_index].valid = 0; __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free(lpm->tbl8, tbl8_group_start); + tbl8_free(lpm, tbl8_group_start); } else if (tbl8_recycle_index > -1) { /* Update tbl24 entry. */ struct rte_lpm_tbl_entry new_tbl24_entry = { @@ -993,7 +1116,7 @@ delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry, __ATOMIC_RELAXED); __atomic_thread_fence(__ATOMIC_RELEASE); - tbl8_free(lpm->tbl8, tbl8_group_start); + tbl8_free(lpm, tbl8_group_start); } #undef group_idx return 0;