+ sizeof(struct bnxt_tun_cache_entry));
+
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
+ flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
+ STAILQ_INIT(&flow_info->tun_i_prms_list);
+ }
+}
+
+static bool
+ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,
+ struct ulp_per_port_flow_info *flow_info,
+ uint32_t fid)
+{
+ struct ulp_rte_parser_params *inner_params;
+ int j;
+
+ STAILQ_FOREACH(inner_params,
+ &flow_info->tun_i_prms_list,
+ next) {
+ if (inner_params->fid == fid) {
+ STAILQ_REMOVE(&flow_info->tun_i_prms_list,
+ inner_params,
+ ulp_rte_parser_params,
+ next);
+ rte_free(inner_params);
+ flow_info->tun_i_cnt--;
+ /* When a dpdk application offloads a duplicate
+ * tunnel inner flow on a port that it is not
+ * destined to, there won't be a tunnel outer flow
+ * associated with these duplicate tunnel inner flows.
+ * So, when the last tunnel inner flow ages out, the
+ * driver has to clear the tunnel entry, otherwise
+ * the tunnel entry cannot be reused.
+ */
+ if (!flow_info->tun_i_cnt &&
+ flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {
+ memset(tun_entry, 0,
+ sizeof(struct bnxt_tun_cache_entry));
+ for (j = 0; j < RTE_MAX_ETHPORTS; j++)
+ STAILQ_INIT(&flow_info->tun_i_prms_list);
+ }
+ return true;
+ }
+ }
+
+ return false;