* All rights reserved.
*/
+#include <sys/queue.h>
+
#include <rte_malloc.h>
#include "ulp_tun.h"
goto err;
/* Store the tunnel dmac in the tunnel cache table and use it while
- * programming tunnel flow F2.
+ * programming tunnel inner flow.
*/
memcpy(tun_entry->t_dmac,
¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
RTE_ETHER_ADDR_LEN);
- tun_entry->valid = true;
tun_entry->tun_flow_info[params->port_id].state =
BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
tun_entry->outer_tun_flow_id = params->fid;
- /* F1 and it's related F2s are correlated based on
- * Tunnel Destination IP Address.
+ /* Tunnel outer flow and it's related inner flows are correlated
+ * based on Tunnel Destination IP Address.
*/
if (tun_entry->t_dst_ip_valid)
goto done;
{
struct bnxt_ulp_mapper_create_parms mparms = { 0 };
struct ulp_per_port_flow_info *flow_info;
- struct ulp_rte_parser_params *params;
+ struct ulp_rte_parser_params *inner_params;
int ret;
- /* F2 doesn't have tunnel dmac, use the tunnel dmac that was
- * stored during F1 programming.
+ /* Tunnel inner flow doesn't have tunnel dmac, use the tunnel
+ * dmac that was stored during F1 programming.
*/
flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
- params = &flow_info->first_inner_tun_params;
- memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
- tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
- params->parent_fid = tun_entry->outer_tun_flow_id;
- params->fid = flow_info->first_tun_i_fid;
-
- bnxt_ulp_init_mapper_params(&mparms, params,
- BNXT_ULP_FDB_TYPE_REGULAR);
-
- ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
- if (ret)
- PMD_DRV_LOG(ERR, "Failed to create F2 flow.");
+ STAILQ_FOREACH(inner_params, &flow_info->tun_i_prms_list, next) {
+ memcpy(&inner_params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
+ tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
+ inner_params->parent_fid = tun_entry->outer_tun_flow_id;
+
+ bnxt_ulp_init_mapper_params(&mparms, inner_params,
+ BNXT_ULP_FDB_TYPE_REGULAR);
+
+ ret = ulp_mapper_flow_create(inner_params->ulp_ctx, &mparms);
+ if (ret)
+ PMD_DRV_LOG(ERR,
+ "Failed to create inner tun flow, FID:%u.",
+ inner_params->fid);
+ }
}
/* This function either install outer tunnel flow & inner tunnel flow
struct bnxt_tun_cache_entry *tun_entry,
uint16_t tun_idx)
{
- enum bnxt_ulp_tun_flow_state flow_state;
int ret;
- flow_state = tun_entry->tun_flow_info[params->port_id].state;
ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
if (ret == BNXT_TF_RC_ERROR) {
PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
return ret;
}
- /* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
- * F1, that means F2 is not deferred. Hence, no need to install F2.
+ /* Install any cached tunnel inner flows that came before tunnel
+ * outer flow.
*/
- if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
- ulp_install_inner_tun_flow(tun_entry, params);
+ ulp_install_inner_tun_flow(tun_entry, params);
return BNXT_TF_RC_FID;
}
* outer tunnel flow request.
*/
static int32_t
-ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
+ulp_post_process_cache_inner_tun_flow(struct ulp_rte_parser_params *params,
struct bnxt_tun_cache_entry *tun_entry)
{
+ struct ulp_rte_parser_params *inner_tun_params;
struct ulp_per_port_flow_info *flow_info;
int ret;
if (ret != BNXT_TF_RC_SUCCESS)
return BNXT_TF_RC_ERROR;
- /* If Tunnel F2 flow comes first then we can't install it in the
- * hardware, because, F2 flow will not have L2 context information.
- * So, just cache the F2 information and program it in the context
- * of F1 flow installation.
+ /* If Tunnel inner flow comes first then we can't install it in the
+ * hardware, because, Tunnel inner flow will not have L2 context
+ * information. So, just cache the Tunnel inner flow information
+ * and program it in the context of F1 flow installation.
*/
flow_info = &tun_entry->tun_flow_info[params->port_id];
- memcpy(&flow_info->first_inner_tun_params, params,
- sizeof(struct ulp_rte_parser_params));
-
- flow_info->first_tun_i_fid = params->fid;
- flow_info->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
+ inner_tun_params = rte_zmalloc("ulp_inner_tun_params",
+ sizeof(struct ulp_rte_parser_params), 0);
+ if (!inner_tun_params)
+ return BNXT_TF_RC_ERROR;
+ memcpy(inner_tun_params, params, sizeof(struct ulp_rte_parser_params));
+ STAILQ_INSERT_TAIL(&flow_info->tun_i_prms_list, inner_tun_params,
+ next);
+ flow_info->tun_i_cnt++;
- /* F1 and it's related F2s are correlated based on
+ /* F1 and it's related Tunnel inner flows are correlated based on
* Tunnel Destination IP Address. It could be already set, if
* the inner flow got offloaded first.
*/
int32_t
ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
{
- bool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;
- bool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;
+ bool inner_tun_sig, cache_inner_tun_flow;
+ bool outer_tun_reject, outer_tun_flow, inner_tun_flow;
enum bnxt_ulp_tun_flow_state flow_state;
struct bnxt_tun_cache_entry *tun_entry;
uint32_t l3_tun, l3_tun_decap;
if (rc == BNXT_TF_RC_ERROR)
return rc;
+ if (params->port_id >= RTE_MAX_ETHPORTS)
+ return BNXT_TF_RC_ERROR;
flow_state = tun_entry->tun_flow_info[params->port_id].state;
/* Outer tunnel flow validation */
- outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
- outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
+ outer_tun_flow = BNXT_OUTER_TUN_FLOW(l3_tun, params);
outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
- outer_tun_sig);
+ outer_tun_flow);
/* Inner tunnel flow validation */
inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
- first_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,
+ cache_inner_tun_flow = BNXT_CACHE_INNER_TUN_FLOW(flow_state,
inner_tun_sig);
inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
- inner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,
- inner_tun_sig);
if (outer_tun_reject) {
tun_entry->outer_tun_rej_cnt++;
BNXT_TF_DBG(ERR,
"Tunnel F1 flow rejected, COUNT: %d\n",
tun_entry->outer_tun_rej_cnt);
- /* Inner tunnel flow is rejected if it comes between first inner
- * tunnel flow and outer flow requests.
- */
- } else if (inner_tun_reject) {
- tun_entry->inner_tun_rej_cnt++;
- BNXT_TF_DBG(ERR,
- "Tunnel F2 flow rejected, COUNT: %d\n",
- tun_entry->inner_tun_rej_cnt);
}
- if (outer_tun_reject || inner_tun_reject)
+ if (outer_tun_reject)
return BNXT_TF_RC_ERROR;
- else if (first_inner_tun_flow)
- return ulp_post_process_first_inner_tun_flow(params, tun_entry);
+ else if (cache_inner_tun_flow)
+ return ulp_post_process_cache_inner_tun_flow(params, tun_entry);
else if (outer_tun_flow)
return ulp_post_process_outer_tun_flow(params, tun_entry,
tun_idx);
return BNXT_TF_RC_NORMAL;
}
+void
+ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl)
+{
+ struct ulp_per_port_flow_info *flow_info;
+ int i, j;
+
+ for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+ flow_info = &tun_tbl[i].tun_flow_info[j];
+ STAILQ_INIT(&flow_info->tun_i_prms_list);
+ }
+ }
+}
+
void
ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
{
+ struct ulp_rte_parser_params *inner_params;
+ struct ulp_per_port_flow_info *flow_info;
+ int j;
+
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+ flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
+ STAILQ_FOREACH(inner_params,
+ &flow_info->tun_i_prms_list,
+ next) {
+ STAILQ_REMOVE(&flow_info->tun_i_prms_list,
+ inner_params,
+ ulp_rte_parser_params, next);
+ rte_free(inner_params);
+ }
+ }
+
memset(&tun_tbl[tun_idx], 0,
- sizeof(struct bnxt_tun_cache_entry));
+ sizeof(struct bnxt_tun_cache_entry));
+
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+ flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
+ STAILQ_INIT(&flow_info->tun_i_prms_list);
+ }
+}
+
+static bool
+ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,
+ struct ulp_per_port_flow_info *flow_info,
+ uint32_t fid)
+{
+ struct ulp_rte_parser_params *inner_params;
+ int j;
+
+ STAILQ_FOREACH(inner_params,
+ &flow_info->tun_i_prms_list,
+ next) {
+ if (inner_params->fid == fid) {
+ STAILQ_REMOVE(&flow_info->tun_i_prms_list,
+ inner_params,
+ ulp_rte_parser_params,
+ next);
+ rte_free(inner_params);
+ flow_info->tun_i_cnt--;
+ /* When a dpdk application offloads a duplicate
+ * tunnel inner flow on a port that it is not
+ * destined to, there won't be a tunnel outer flow
+ * associated with these duplicate tunnel inner flows.
+ * So, when the last tunnel inner flow ages out, the
+ * driver has to clear the tunnel entry, otherwise
+ * the tunnel entry cannot be reused.
+ */
+ if (!flow_info->tun_i_cnt &&
+ flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {
+ memset(tun_entry, 0,
+ sizeof(struct bnxt_tun_cache_entry));
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++)
+ STAILQ_INIT(&flow_info->tun_i_prms_list);
+ }
+ return true;
+ }
+ }
+
+ return false;
}
/* When a dpdk application offloads the same tunnel inner flow
struct ulp_per_port_flow_info *flow_info;
int i, j;
- for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES ; i++) {
+ for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
+ if (!tun_tbl[i].t_dst_ip_valid)
+ continue;
for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
flow_info = &tun_tbl[i].tun_flow_info[j];
- if (flow_info->first_tun_i_fid == fid &&
- flow_info->state == BNXT_ULP_FLOW_STATE_TUN_I_CACHED)
- memset(flow_info, 0, sizeof(*flow_info));
+ if (ulp_chk_and_rem_tun_i_flow(&tun_tbl[i],
+ flow_info, fid) == true)
+ return;
}
}
}
#include "ulp_template_db_enum.h"
#include "ulp_template_struct.h"
-#define BNXT_OUTER_TUN_SIGNATURE(l3_tun, params) \
+#define BNXT_OUTER_TUN_FLOW(l3_tun, params) \
((l3_tun) && \
ULP_BITMAP_ISSET((params)->act_bitmap.bits, \
BNXT_ULP_ACTION_BIT_JUMP))
!ULP_BITMAP_ISSET((params)->hdr_bitmap.bits, \
BNXT_ULP_HDR_BIT_O_ETH))
-#define BNXT_FIRST_INNER_TUN_FLOW(state, inner_tun_sig) \
+#define BNXT_CACHE_INNER_TUN_FLOW(state, inner_tun_sig) \
((state) == BNXT_ULP_FLOW_STATE_NORMAL && (inner_tun_sig))
#define BNXT_INNER_TUN_FLOW(state, inner_tun_sig) \
((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (inner_tun_sig))
-#define BNXT_OUTER_TUN_FLOW(outer_tun_sig) ((outer_tun_sig))
/* It is invalid to get another outer flow offload request
* for the same tunnel, while the outer flow is already offloaded.
*/
#define BNXT_REJECT_OUTER_TUN_FLOW(state, outer_tun_sig) \
((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (outer_tun_sig))
-/* It is invalid to get another inner flow offload request
- * for the same tunnel, while the outer flow is not yet offloaded.
- */
-#define BNXT_REJECT_INNER_TUN_FLOW(state, inner_tun_sig) \
- ((state) == BNXT_ULP_FLOW_STATE_TUN_I_CACHED && (inner_tun_sig))
#define ULP_TUN_O_DMAC_HDR_FIELD_INDEX 1
#define ULP_TUN_O_IPV4_DIP_INDEX 19
* requests arrive.
*
* If inner tunnel flow offload request arrives first then the flow
- * state will change from BNXT_ULP_FLOW_STATE_NORMAL to
- * BNXT_ULP_FLOW_STATE_TUN_I_CACHED and the following outer tunnel
- * flow offload request will change the state of the flow to
- * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from BNXT_ULP_FLOW_STATE_TUN_I_CACHED.
+ * state will remain in BNXT_ULP_FLOW_STATE_NORMAL state.
+ * The following outer tunnel flow offload request will change the
+ * state of the flow to BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from
+ * BNXT_ULP_FLOW_STATE_NORMAL.
*
* If outer tunnel flow offload request arrives first then the flow state
* will change from BNXT_ULP_FLOW_STATE_NORMAL to
enum bnxt_ulp_tun_flow_state {
BNXT_ULP_FLOW_STATE_NORMAL = 0,
BNXT_ULP_FLOW_STATE_TUN_O_OFFLD,
- BNXT_ULP_FLOW_STATE_TUN_I_CACHED
};
struct ulp_per_port_flow_info {
- enum bnxt_ulp_tun_flow_state state;
- uint32_t first_tun_i_fid;
- struct ulp_rte_parser_params first_inner_tun_params;
+ enum bnxt_ulp_tun_flow_state state;
+ uint32_t tun_i_cnt;
+ STAILQ_HEAD(, ulp_rte_parser_params) tun_i_prms_list;
};
struct bnxt_tun_cache_entry {
- bool valid;
bool t_dst_ip_valid;
uint8_t t_dmac[RTE_ETHER_ADDR_LEN];
union {
};
uint32_t outer_tun_flow_id;
uint16_t outer_tun_rej_cnt;
- uint16_t inner_tun_rej_cnt;
struct ulp_per_port_flow_info tun_flow_info[RTE_MAX_ETHPORTS];
};
+void
+ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl);
+
void
ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx);