1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
11 /* returns negative on error, 1 if new entry is allocated or zero if old */
13 ulp_app_tun_search_entry(struct bnxt_ulp_context *ulp_ctx,
14 struct rte_flow_tunnel *app_tunnel,
15 struct bnxt_flow_app_tun_ent **tun_entry)
17 struct bnxt_flow_app_tun_ent *tun_ent_list;
18 int32_t i, rc = 0, free_entry = -1;
20 tun_ent_list = bnxt_ulp_cntxt_ptr2_app_tun_list_get(ulp_ctx);
22 BNXT_TF_DBG(ERR, "unable to get the app tunnel list\n");
26 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
27 if (!tun_ent_list[i].ref_cnt) {
31 if (!memcmp(&tun_ent_list[i].app_tunnel,
33 sizeof(struct rte_flow_tunnel))) {
34 *tun_entry = &tun_ent_list[i];
35 tun_ent_list[free_entry].ref_cnt++;
40 if (free_entry >= 0) {
41 *tun_entry = &tun_ent_list[free_entry];
42 memcpy(&tun_ent_list[free_entry].app_tunnel, app_tunnel,
43 sizeof(struct rte_flow_tunnel));
44 tun_ent_list[free_entry].ref_cnt = 1;
47 BNXT_TF_DBG(ERR, "ulp app tunnel list is full\n");
55 ulp_app_tun_entry_delete(struct bnxt_flow_app_tun_ent *tun_entry)
58 if (tun_entry->ref_cnt) {
60 if (!tun_entry->ref_cnt)
62 sizeof(struct bnxt_flow_app_tun_ent));
68 ulp_app_tun_entry_set_decap_action(struct bnxt_flow_app_tun_ent *tun_entry)
73 tun_entry->action.type = (typeof(tun_entry->action.type))
74 BNXT_RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
75 tun_entry->action.conf = tun_entry;
80 ulp_app_tun_entry_set_decap_item(struct bnxt_flow_app_tun_ent *tun_entry)
85 tun_entry->item.type = (typeof(tun_entry->item.type))
86 BNXT_RTE_FLOW_ITEM_TYPE_VXLAN_DECAP;
87 tun_entry->item.spec = tun_entry;
88 tun_entry->item.last = NULL;
89 tun_entry->item.mask = NULL;
93 struct bnxt_flow_app_tun_ent *
94 ulp_app_tun_match_entry(struct bnxt_ulp_context *ulp_ctx,
97 struct bnxt_flow_app_tun_ent *tun_ent_list;
100 tun_ent_list = bnxt_ulp_cntxt_ptr2_app_tun_list_get(ulp_ctx);
102 BNXT_TF_DBG(ERR, "unable to get the app tunnel list\n");
106 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
107 if (&tun_ent_list[i] == ctx)
108 return &tun_ent_list[i];
114 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
115 struct bnxt_tun_cache_entry **tun_entry,
118 int32_t i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
119 struct bnxt_tun_cache_entry *tun_tbl;
120 uint32_t dip_idx, dmac_idx, use_ipv4 = 0;
122 tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
124 BNXT_TF_DBG(ERR, "Error: could not get Tunnel table\n");
125 return BNXT_TF_RC_ERROR;
128 /* get the outer destination ip field index */
129 dip_idx = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID);
130 dmac_idx = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID);
131 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
134 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
135 if (!tun_tbl[i].t_dst_ip_valid) {
136 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
137 first_free_entry = i;
140 /* match on the destination ip of the tunnel */
141 if ((use_ipv4 && !memcmp(&tun_tbl[i].t_dst_ip,
142 params->hdr_field[dip_idx].spec,
143 sizeof(rte_be32_t))) ||
145 !memcmp(tun_tbl[i].t_dst_ip6,
146 params->hdr_field[dip_idx].spec,
147 sizeof(((struct bnxt_tun_cache_entry *)
148 NULL)->t_dst_ip6)))) {
149 *tun_entry = &tun_tbl[i];
154 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID) {
155 BNXT_TF_DBG(ERR, "Error: No entry available in tunnel table\n");
156 return BNXT_TF_RC_ERROR;
159 *tun_idx = first_free_entry;
160 *tun_entry = &tun_tbl[first_free_entry];
161 tun_tbl[first_free_entry].t_dst_ip_valid = true;
163 /* Update the destination ip and mac */
165 memcpy(&tun_tbl[first_free_entry].t_dst_ip,
166 params->hdr_field[dip_idx].spec, sizeof(rte_be32_t));
168 memcpy(tun_tbl[first_free_entry].t_dst_ip6,
169 params->hdr_field[dip_idx].spec,
170 sizeof(((struct bnxt_tun_cache_entry *)
172 memcpy(tun_tbl[first_free_entry].t_dmac,
173 params->hdr_field[dmac_idx].spec, RTE_ETHER_ADDR_LEN);
178 /* Tunnel API to delete the tunnel entry */
180 ulp_tunnel_offload_entry_clear(struct bnxt_tun_cache_entry *tun_tbl,
183 memset(&tun_tbl[tun_idx], 0, sizeof(struct bnxt_tun_cache_entry));
186 /* Tunnel API to perform tunnel offload process when there is F1/F2 flows */
188 ulp_tunnel_offload_process(struct ulp_rte_parser_params *params)
190 struct bnxt_tun_cache_entry *tun_entry;
192 int32_t rc = BNXT_TF_RC_SUCCESS;
194 /* Perform the tunnel offload only for F1 and F2 flows */
195 if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
196 BNXT_ULP_HDR_BIT_F1) &&
197 !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
198 BNXT_ULP_HDR_BIT_F2))
201 /* search for the tunnel entry if not found create one */
202 rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
203 if (rc == BNXT_TF_RC_ERROR)
206 /* Tunnel offload for the outer Tunnel flow */
207 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
208 BNXT_ULP_HDR_BIT_F1)) {
209 /* Reset the JUMP action bit in the action bitmap as we don't
210 * offload this action.
212 ULP_BITMAP_RESET(params->act_bitmap.bits,
213 BNXT_ULP_ACT_BIT_JUMP);
214 params->parent_flow = true;
215 params->tun_idx = tun_idx;
216 tun_entry->outer_tun_flow_id = params->fid;
217 } else if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
218 BNXT_ULP_HDR_BIT_F2)) {
219 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
220 BNXT_ULP_HDR_BIT_F2);
221 /* add the vxlan decap action for F2 flows */
222 ULP_BITMAP_SET(params->act_bitmap.bits,
223 BNXT_ULP_ACT_BIT_VXLAN_DECAP);
224 params->child_flow = true;
225 params->tun_idx = tun_idx;
226 params->parent_flow = false;
228 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_ID, tun_idx);