enum bnxt_ulp_fdb_type flow_type,
uint32_t fid)
{
- struct bnxt_ulp_flow_db *flow_db;
+ struct bnxt_tun_cache_entry *tun_tbl;
struct bnxt_ulp_flow_tbl *flow_tbl;
+ struct bnxt_ulp_flow_db *flow_db;
flow_db = bnxt_ulp_cntxt_ptr2_flow_db_get(ulp_ctxt);
if (!flow_db) {
if (flow_type == BNXT_ULP_FDB_TYPE_REGULAR)
ulp_flow_db_func_id_set(flow_db, fid, 0);
+ tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(ulp_ctxt);
+ if (!tun_tbl)
+ return -EINVAL;
+
+ ulp_clear_tun_inner_entry(tun_tbl, fid);
+
/* all good, return success */
return 0;
}
*/
int32_t
ulp_flow_db_parent_flow_count_get(struct bnxt_ulp_context *ulp_ctxt,
- uint32_t parent_fid,
- uint64_t *packet_count,
- uint64_t *byte_count)
+ uint32_t parent_fid, uint64_t *packet_count,
+ uint64_t *byte_count, uint8_t count_reset)
{
struct bnxt_ulp_flow_db *flow_db;
struct ulp_fdb_parent_child_db *p_pdb;
p_pdb->parent_flow_tbl[idx].pkt_count;
*byte_count =
p_pdb->parent_flow_tbl[idx].byte_count;
+ if (count_reset) {
+ p_pdb->parent_flow_tbl[idx].pkt_count = 0;
+ p_pdb->parent_flow_tbl[idx].byte_count = 0;
+ }
}
return 0;
}
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
}
+ /* Some of the PMD applications may set the protocol field
+ * in the IPv4 spec but don't set the mask. So, consider
+ * the mask in the proto value calculation.
+ */
+ if (ipv4_mask)
+ proto &= ipv4_mask->hdr.next_proto_id;
+
/* Update the field protocol hdr bitmap */
ulp_rte_l3_proto_type_update(params, proto, inner_flag);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
}
+ /* Some of the PMD applications may set the protocol field
+ * in the IPv6 spec but don't set the mask. So, consider
+ * the mask in proto value calculation.
+ */
+ if (ipv6_mask)
+ proto &= ipv6_mask->hdr.proto;
+
/* Update the field protocol hdr bitmap */
ulp_rte_l3_proto_type_update(params, proto, inner_flag);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
RTE_ETHER_ADDR_LEN);
tun_entry->valid = true;
- tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
+ tun_entry->tun_flow_info[params->port_id].state =
+ BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
tun_entry->outer_tun_flow_id = params->fid;
/* F1 and it's related F2s are correlated based on
/* This function programs the inner tunnel flow in the hardware. */
static void
-ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry)
+ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
+ struct ulp_rte_parser_params *tun_o_params)
{
struct bnxt_ulp_mapper_create_parms mparms = { 0 };
+ struct ulp_per_port_flow_info *flow_info;
struct ulp_rte_parser_params *params;
int ret;
/* F2 doesn't have tunnel dmac, use the tunnel dmac that was
* stored during F1 programming.
*/
- params = &tun_entry->first_inner_tun_params;
+ flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
+ params = &flow_info->first_inner_tun_params;
memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
params->parent_fid = tun_entry->outer_tun_flow_id;
- params->fid = tun_entry->first_inner_tun_flow_id;
+ params->fid = flow_info->first_tun_i_fid;
bnxt_ulp_init_mapper_params(&mparms, params,
BNXT_ULP_FDB_TYPE_REGULAR);
enum bnxt_ulp_tun_flow_state flow_state;
int ret;
- flow_state = tun_entry->state;
+ flow_state = tun_entry->tun_flow_info[params->port_id].state;
ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
- if (ret)
+ if (ret == BNXT_TF_RC_ERROR) {
+ PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
return ret;
+ }
/* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
* F1, that means F2 is not deferred. Hence, no need to install F2.
*/
if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
- ulp_install_inner_tun_flow(tun_entry);
+ ulp_install_inner_tun_flow(tun_entry, params);
- return 0;
+ return BNXT_TF_RC_FID;
}
/* This function will be called if inner tunnel flow request comes before
ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
struct bnxt_tun_cache_entry *tun_entry)
{
+ struct ulp_per_port_flow_info *flow_info;
int ret;
ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
* So, just cache the F2 information and program it in the context
* of F1 flow installation.
*/
- memcpy(&tun_entry->first_inner_tun_params, params,
+ flow_info = &tun_entry->tun_flow_info[params->port_id];
+ memcpy(&flow_info->first_inner_tun_params, params,
sizeof(struct ulp_rte_parser_params));
- tun_entry->first_inner_tun_flow_id = params->fid;
- tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
+ flow_info->first_tun_i_fid = params->fid;
+ flow_info->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
/* F1 and it's related F2s are correlated based on
* Tunnel Destination IP Address. It could be already set, if
if (rc == BNXT_TF_RC_ERROR)
return rc;
- flow_state = tun_entry->state;
+ flow_state = tun_entry->tun_flow_info[params->port_id].state;
/* Outer tunnel flow validation */
outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
memset(&tun_tbl[tun_idx], 0,
sizeof(struct bnxt_tun_cache_entry));
}
+
+/* When a dpdk application offloads the same tunnel inner flow
+ * on all the uplink ports, a tunnel inner flow entry is cached
+ * even if it is not for the right uplink port. Such tunnel
+ * inner flows will eventually get aged out as there won't be
+ * any traffic on these ports. When such a flow destroy is
+ * called, cleanup the tunnel inner flow entry.
+ */
+void
+ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
+{
+ struct ulp_per_port_flow_info *flow_info;
+ int i, j;
+
+ for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES ; i++) {
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+ flow_info = &tun_tbl[i].tun_flow_info[j];
+ if (flow_info->first_tun_i_fid == fid &&
+ flow_info->state == BNXT_ULP_FLOW_STATE_TUN_I_CACHED)
+ memset(flow_info, 0, sizeof(*flow_info));
+ }
+ }
+}