1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
6 #include <rte_malloc.h>
9 #include "ulp_rte_parser.h"
10 #include "ulp_template_db_enum.h"
11 #include "ulp_template_struct.h"
12 #include "ulp_matcher.h"
13 #include "ulp_mapper.h"
14 #include "ulp_flow_db.h"
16 /* This function programs the outer tunnel flow in the hardware. */
18 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
19 struct bnxt_tun_cache_entry *tun_entry,
22 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
25 /* Reset the JUMP action bit in the action bitmap as we don't
26 * offload this action.
28 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
30 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
32 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
33 if (ret != BNXT_TF_RC_SUCCESS)
36 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
37 if (ret != BNXT_TF_RC_SUCCESS)
40 params->parent_flow = true;
41 bnxt_ulp_init_mapper_params(&mparms, params,
42 BNXT_ULP_FDB_TYPE_REGULAR);
43 mparms.tun_idx = tun_idx;
45 /* Call the ulp mapper to create the flow in the hardware. */
46 ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
50 /* Store the tunnel dmac in the tunnel cache table and use it while
51 * programming tunnel inner flow.
53 memcpy(tun_entry->t_dmac,
54 ¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
57 tun_entry->tun_flow_info[params->port_id].state =
58 BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
59 tun_entry->outer_tun_flow_id = params->fid;
61 /* Tunnel outer flow and it's related inner flows are correlated
62 * based on Tunnel Destination IP Address.
64 if (tun_entry->t_dst_ip_valid)
66 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
67 memcpy(&tun_entry->t_dst_ip,
68 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
71 memcpy(tun_entry->t_dst_ip6,
72 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
73 sizeof(tun_entry->t_dst_ip6));
74 tun_entry->t_dst_ip_valid = true;
77 return BNXT_TF_RC_FID;
80 memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
81 return BNXT_TF_RC_ERROR;
84 /* This function programs the inner tunnel flow in the hardware. */
86 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
87 struct ulp_rte_parser_params *tun_o_params)
89 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
90 struct ulp_per_port_flow_info *flow_info;
91 struct ulp_rte_parser_params *inner_params;
94 /* Tunnel inner flow doesn't have tunnel dmac, use the tunnel
95 * dmac that was stored during F1 programming.
97 flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
98 STAILQ_FOREACH(inner_params, &flow_info->tun_i_prms_list, next) {
99 memcpy(&inner_params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
100 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
101 inner_params->parent_fid = tun_entry->outer_tun_flow_id;
103 bnxt_ulp_init_mapper_params(&mparms, inner_params,
104 BNXT_ULP_FDB_TYPE_REGULAR);
106 ret = ulp_mapper_flow_create(inner_params->ulp_ctx, &mparms);
109 "Failed to create inner tun flow, FID:%u.",
114 /* This function either install outer tunnel flow & inner tunnel flow
115 * or just the outer tunnel flow based on the flow state.
118 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
119 struct bnxt_tun_cache_entry *tun_entry,
124 ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
125 if (ret == BNXT_TF_RC_ERROR) {
126 PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
130 /* Install any cached tunnel inner flows that came before tunnel
133 ulp_install_inner_tun_flow(tun_entry, params);
135 return BNXT_TF_RC_FID;
138 /* This function will be called if inner tunnel flow request comes before
139 * outer tunnel flow request.
142 ulp_post_process_cache_inner_tun_flow(struct ulp_rte_parser_params *params,
143 struct bnxt_tun_cache_entry *tun_entry)
145 struct ulp_rte_parser_params *inner_tun_params;
146 struct ulp_per_port_flow_info *flow_info;
149 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
150 if (ret != BNXT_TF_RC_SUCCESS)
151 return BNXT_TF_RC_ERROR;
153 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
154 if (ret != BNXT_TF_RC_SUCCESS)
155 return BNXT_TF_RC_ERROR;
157 /* If Tunnel inner flow comes first then we can't install it in the
158 * hardware, because, Tunnel inner flow will not have L2 context
159 * information. So, just cache the Tunnel inner flow information
160 * and program it in the context of F1 flow installation.
162 flow_info = &tun_entry->tun_flow_info[params->port_id];
163 inner_tun_params = rte_zmalloc("ulp_inner_tun_params",
164 sizeof(struct ulp_rte_parser_params), 0);
165 if (!inner_tun_params)
166 return BNXT_TF_RC_ERROR;
167 memcpy(inner_tun_params, params, sizeof(struct ulp_rte_parser_params));
168 STAILQ_INSERT_TAIL(&flow_info->tun_i_prms_list, inner_tun_params,
170 flow_info->tun_i_cnt++;
172 /* F1 and it's related Tunnel inner flows are correlated based on
173 * Tunnel Destination IP Address. It could be already set, if
174 * the inner flow got offloaded first.
176 if (tun_entry->t_dst_ip_valid)
178 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
179 memcpy(&tun_entry->t_dst_ip,
180 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
183 memcpy(tun_entry->t_dst_ip6,
184 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
185 sizeof(tun_entry->t_dst_ip6));
186 tun_entry->t_dst_ip_valid = true;
189 return BNXT_TF_RC_FID;
192 /* This function will be called if inner tunnel flow request comes after
193 * the outer tunnel flow request.
196 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
197 struct bnxt_tun_cache_entry *tun_entry)
199 memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
200 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
202 params->parent_fid = tun_entry->outer_tun_flow_id;
204 return BNXT_TF_RC_NORMAL;
208 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
209 struct bnxt_tun_cache_entry **tun_entry,
212 int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
213 struct bnxt_tun_cache_entry *tun_tbl;
214 bool tun_entry_found = false, free_entry_found = false;
216 tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
218 return BNXT_TF_RC_ERROR;
220 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
221 if (!memcmp(&tun_tbl[i].t_dst_ip,
222 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
223 sizeof(rte_be32_t)) ||
224 !memcmp(&tun_tbl[i].t_dst_ip6,
225 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
227 tun_entry_found = true;
231 if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
232 first_free_entry = i;
233 free_entry_found = true;
237 if (tun_entry_found) {
238 *tun_entry = &tun_tbl[i];
241 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
242 return BNXT_TF_RC_ERROR;
243 *tun_entry = &tun_tbl[first_free_entry];
244 *tun_idx = first_free_entry;
251 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
253 bool inner_tun_sig, cache_inner_tun_flow;
254 bool outer_tun_reject, outer_tun_flow, inner_tun_flow;
255 enum bnxt_ulp_tun_flow_state flow_state;
256 struct bnxt_tun_cache_entry *tun_entry;
257 uint32_t l3_tun, l3_tun_decap;
261 /* Computational fields that indicate it's a TUNNEL DECAP flow */
262 l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
263 l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
264 BNXT_ULP_CF_IDX_L3_TUN_DECAP);
266 return BNXT_TF_RC_NORMAL;
268 rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
269 if (rc == BNXT_TF_RC_ERROR)
272 if (params->port_id >= RTE_MAX_ETHPORTS)
273 return BNXT_TF_RC_ERROR;
274 flow_state = tun_entry->tun_flow_info[params->port_id].state;
275 /* Outer tunnel flow validation */
276 outer_tun_flow = BNXT_OUTER_TUN_FLOW(l3_tun, params);
277 outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
280 /* Inner tunnel flow validation */
281 inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
282 cache_inner_tun_flow = BNXT_CACHE_INNER_TUN_FLOW(flow_state,
284 inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
286 if (outer_tun_reject) {
287 tun_entry->outer_tun_rej_cnt++;
289 "Tunnel F1 flow rejected, COUNT: %d\n",
290 tun_entry->outer_tun_rej_cnt);
293 if (outer_tun_reject)
294 return BNXT_TF_RC_ERROR;
295 else if (cache_inner_tun_flow)
296 return ulp_post_process_cache_inner_tun_flow(params, tun_entry);
297 else if (outer_tun_flow)
298 return ulp_post_process_outer_tun_flow(params, tun_entry,
300 else if (inner_tun_flow)
301 return ulp_post_process_inner_tun_flow(params, tun_entry);
303 return BNXT_TF_RC_NORMAL;
307 ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl)
309 struct ulp_per_port_flow_info *flow_info;
312 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
313 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
314 flow_info = &tun_tbl[i].tun_flow_info[j];
315 STAILQ_INIT(&flow_info->tun_i_prms_list);
321 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
323 struct ulp_rte_parser_params *inner_params;
324 struct ulp_per_port_flow_info *flow_info;
327 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
328 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
329 STAILQ_FOREACH(inner_params,
330 &flow_info->tun_i_prms_list,
332 STAILQ_REMOVE(&flow_info->tun_i_prms_list,
334 ulp_rte_parser_params, next);
335 rte_free(inner_params);
339 memset(&tun_tbl[tun_idx], 0,
340 sizeof(struct bnxt_tun_cache_entry));
342 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
343 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
344 STAILQ_INIT(&flow_info->tun_i_prms_list);
349 ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,
350 struct ulp_per_port_flow_info *flow_info,
353 struct ulp_rte_parser_params *inner_params;
356 STAILQ_FOREACH(inner_params,
357 &flow_info->tun_i_prms_list,
359 if (inner_params->fid == fid) {
360 STAILQ_REMOVE(&flow_info->tun_i_prms_list,
362 ulp_rte_parser_params,
364 rte_free(inner_params);
365 flow_info->tun_i_cnt--;
366 /* When a dpdk application offloads a duplicate
367 * tunnel inner flow on a port that it is not
368 * destined to, there won't be a tunnel outer flow
369 * associated with these duplicate tunnel inner flows.
370 * So, when the last tunnel inner flow ages out, the
371 * driver has to clear the tunnel entry, otherwise
372 * the tunnel entry cannot be reused.
374 if (!flow_info->tun_i_cnt &&
375 flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {
377 sizeof(struct bnxt_tun_cache_entry));
378 for (j = 0; j < RTE_MAX_ETHPORTS; j++)
379 STAILQ_INIT(&flow_info->tun_i_prms_list);
388 /* When a dpdk application offloads the same tunnel inner flow
389 * on all the uplink ports, a tunnel inner flow entry is cached
390 * even if it is not for the right uplink port. Such tunnel
391 * inner flows will eventually get aged out as there won't be
392 * any traffic on these ports. When such a flow destroy is
393 * called, cleanup the tunnel inner flow entry.
396 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
398 struct ulp_per_port_flow_info *flow_info;
401 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
402 if (!tun_tbl[i].t_dst_ip_valid)
404 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
405 flow_info = &tun_tbl[i].tun_flow_info[j];
406 if (ulp_chk_and_rem_tun_i_flow(&tun_tbl[i],
407 flow_info, fid) == true)