1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
8 #include <rte_malloc.h>
11 #include "ulp_rte_parser.h"
12 #include "ulp_template_db_enum.h"
13 #include "ulp_template_struct.h"
14 #include "ulp_matcher.h"
15 #include "ulp_mapper.h"
16 #include "ulp_flow_db.h"
18 /* This function programs the outer tunnel flow in the hardware. */
20 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
21 struct bnxt_tun_cache_entry *tun_entry,
24 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
27 /* Reset the JUMP action bit in the action bitmap as we don't
28 * offload this action.
30 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
32 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
34 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
35 if (ret != BNXT_TF_RC_SUCCESS)
38 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
39 if (ret != BNXT_TF_RC_SUCCESS)
42 params->parent_flow = true;
43 bnxt_ulp_init_mapper_params(&mparms, params,
44 BNXT_ULP_FDB_TYPE_REGULAR);
45 mparms.tun_idx = tun_idx;
47 /* Call the ulp mapper to create the flow in the hardware. */
48 ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
52 /* Store the tunnel dmac in the tunnel cache table and use it while
53 * programming tunnel inner flow.
55 memcpy(tun_entry->t_dmac,
56 ¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
59 tun_entry->tun_flow_info[params->port_id].state =
60 BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
61 tun_entry->outer_tun_flow_id = params->fid;
63 /* Tunnel outer flow and it's related inner flows are correlated
64 * based on Tunnel Destination IP Address.
66 if (tun_entry->t_dst_ip_valid)
68 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
69 memcpy(&tun_entry->t_dst_ip,
70 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
73 memcpy(tun_entry->t_dst_ip6,
74 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
75 sizeof(tun_entry->t_dst_ip6));
76 tun_entry->t_dst_ip_valid = true;
79 return BNXT_TF_RC_FID;
82 memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
83 return BNXT_TF_RC_ERROR;
86 /* This function programs the inner tunnel flow in the hardware. */
88 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
89 struct ulp_rte_parser_params *tun_o_params)
91 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
92 struct ulp_per_port_flow_info *flow_info;
93 struct ulp_rte_parser_params *inner_params;
96 /* Tunnel inner flow doesn't have tunnel dmac, use the tunnel
97 * dmac that was stored during F1 programming.
99 flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
100 STAILQ_FOREACH(inner_params, &flow_info->tun_i_prms_list, next) {
101 memcpy(&inner_params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
102 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
103 inner_params->parent_fid = tun_entry->outer_tun_flow_id;
105 bnxt_ulp_init_mapper_params(&mparms, inner_params,
106 BNXT_ULP_FDB_TYPE_REGULAR);
108 ret = ulp_mapper_flow_create(inner_params->ulp_ctx, &mparms);
111 "Failed to create inner tun flow, FID:%u.",
116 /* This function either install outer tunnel flow & inner tunnel flow
117 * or just the outer tunnel flow based on the flow state.
120 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
121 struct bnxt_tun_cache_entry *tun_entry,
126 ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
127 if (ret == BNXT_TF_RC_ERROR) {
128 PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
132 /* Install any cached tunnel inner flows that came before tunnel
135 ulp_install_inner_tun_flow(tun_entry, params);
137 return BNXT_TF_RC_FID;
140 /* This function will be called if inner tunnel flow request comes before
141 * outer tunnel flow request.
144 ulp_post_process_cache_inner_tun_flow(struct ulp_rte_parser_params *params,
145 struct bnxt_tun_cache_entry *tun_entry)
147 struct ulp_rte_parser_params *inner_tun_params;
148 struct ulp_per_port_flow_info *flow_info;
151 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
152 if (ret != BNXT_TF_RC_SUCCESS)
153 return BNXT_TF_RC_ERROR;
155 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
156 if (ret != BNXT_TF_RC_SUCCESS)
157 return BNXT_TF_RC_ERROR;
159 /* If Tunnel inner flow comes first then we can't install it in the
160 * hardware, because, Tunnel inner flow will not have L2 context
161 * information. So, just cache the Tunnel inner flow information
162 * and program it in the context of F1 flow installation.
164 flow_info = &tun_entry->tun_flow_info[params->port_id];
165 inner_tun_params = rte_zmalloc("ulp_inner_tun_params",
166 sizeof(struct ulp_rte_parser_params), 0);
167 if (!inner_tun_params)
168 return BNXT_TF_RC_ERROR;
169 memcpy(inner_tun_params, params, sizeof(struct ulp_rte_parser_params));
170 STAILQ_INSERT_TAIL(&flow_info->tun_i_prms_list, inner_tun_params,
172 flow_info->tun_i_cnt++;
174 /* F1 and it's related Tunnel inner flows are correlated based on
175 * Tunnel Destination IP Address. It could be already set, if
176 * the inner flow got offloaded first.
178 if (tun_entry->t_dst_ip_valid)
180 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
181 memcpy(&tun_entry->t_dst_ip,
182 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
185 memcpy(tun_entry->t_dst_ip6,
186 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
187 sizeof(tun_entry->t_dst_ip6));
188 tun_entry->t_dst_ip_valid = true;
191 return BNXT_TF_RC_FID;
194 /* This function will be called if inner tunnel flow request comes after
195 * the outer tunnel flow request.
198 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
199 struct bnxt_tun_cache_entry *tun_entry)
201 memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
202 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
204 params->parent_fid = tun_entry->outer_tun_flow_id;
206 return BNXT_TF_RC_NORMAL;
210 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
211 struct bnxt_tun_cache_entry **tun_entry,
214 int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
215 struct bnxt_tun_cache_entry *tun_tbl;
216 bool tun_entry_found = false, free_entry_found = false;
218 tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
220 return BNXT_TF_RC_ERROR;
222 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
223 if (!memcmp(&tun_tbl[i].t_dst_ip,
224 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
225 sizeof(rte_be32_t)) ||
226 !memcmp(&tun_tbl[i].t_dst_ip6,
227 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
229 tun_entry_found = true;
233 if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
234 first_free_entry = i;
235 free_entry_found = true;
239 if (tun_entry_found) {
240 *tun_entry = &tun_tbl[i];
243 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
244 return BNXT_TF_RC_ERROR;
245 *tun_entry = &tun_tbl[first_free_entry];
246 *tun_idx = first_free_entry;
253 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
255 bool inner_tun_sig, cache_inner_tun_flow;
256 bool outer_tun_reject, outer_tun_flow, inner_tun_flow;
257 enum bnxt_ulp_tun_flow_state flow_state;
258 struct bnxt_tun_cache_entry *tun_entry;
259 uint32_t l3_tun, l3_tun_decap;
263 /* Computational fields that indicate it's a TUNNEL DECAP flow */
264 l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
265 l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
266 BNXT_ULP_CF_IDX_L3_TUN_DECAP);
268 return BNXT_TF_RC_NORMAL;
270 rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
271 if (rc == BNXT_TF_RC_ERROR)
274 if (params->port_id >= RTE_MAX_ETHPORTS)
275 return BNXT_TF_RC_ERROR;
276 flow_state = tun_entry->tun_flow_info[params->port_id].state;
277 /* Outer tunnel flow validation */
278 outer_tun_flow = BNXT_OUTER_TUN_FLOW(l3_tun, params);
279 outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
282 /* Inner tunnel flow validation */
283 inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
284 cache_inner_tun_flow = BNXT_CACHE_INNER_TUN_FLOW(flow_state,
286 inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
288 if (outer_tun_reject) {
289 tun_entry->outer_tun_rej_cnt++;
291 "Tunnel F1 flow rejected, COUNT: %d\n",
292 tun_entry->outer_tun_rej_cnt);
295 if (outer_tun_reject)
296 return BNXT_TF_RC_ERROR;
297 else if (cache_inner_tun_flow)
298 return ulp_post_process_cache_inner_tun_flow(params, tun_entry);
299 else if (outer_tun_flow)
300 return ulp_post_process_outer_tun_flow(params, tun_entry,
302 else if (inner_tun_flow)
303 return ulp_post_process_inner_tun_flow(params, tun_entry);
305 return BNXT_TF_RC_NORMAL;
309 ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl)
311 struct ulp_per_port_flow_info *flow_info;
314 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
315 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
316 flow_info = &tun_tbl[i].tun_flow_info[j];
317 STAILQ_INIT(&flow_info->tun_i_prms_list);
323 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
325 struct ulp_rte_parser_params *inner_params;
326 struct ulp_per_port_flow_info *flow_info;
329 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
330 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
331 STAILQ_FOREACH(inner_params,
332 &flow_info->tun_i_prms_list,
334 STAILQ_REMOVE(&flow_info->tun_i_prms_list,
336 ulp_rte_parser_params, next);
337 rte_free(inner_params);
341 memset(&tun_tbl[tun_idx], 0,
342 sizeof(struct bnxt_tun_cache_entry));
344 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
345 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
346 STAILQ_INIT(&flow_info->tun_i_prms_list);
351 ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,
352 struct ulp_per_port_flow_info *flow_info,
355 struct ulp_rte_parser_params *inner_params;
358 STAILQ_FOREACH(inner_params,
359 &flow_info->tun_i_prms_list,
361 if (inner_params->fid == fid) {
362 STAILQ_REMOVE(&flow_info->tun_i_prms_list,
364 ulp_rte_parser_params,
366 rte_free(inner_params);
367 flow_info->tun_i_cnt--;
368 /* When a dpdk application offloads a duplicate
369 * tunnel inner flow on a port that it is not
370 * destined to, there won't be a tunnel outer flow
371 * associated with these duplicate tunnel inner flows.
372 * So, when the last tunnel inner flow ages out, the
373 * driver has to clear the tunnel entry, otherwise
374 * the tunnel entry cannot be reused.
376 if (!flow_info->tun_i_cnt &&
377 flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {
379 sizeof(struct bnxt_tun_cache_entry));
380 for (j = 0; j < RTE_MAX_ETHPORTS; j++)
381 STAILQ_INIT(&flow_info->tun_i_prms_list);
390 /* When a dpdk application offloads the same tunnel inner flow
391 * on all the uplink ports, a tunnel inner flow entry is cached
392 * even if it is not for the right uplink port. Such tunnel
393 * inner flows will eventually get aged out as there won't be
394 * any traffic on these ports. When such a flow destroy is
395 * called, cleanup the tunnel inner flow entry.
398 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
400 struct ulp_per_port_flow_info *flow_info;
403 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
404 if (!tun_tbl[i].t_dst_ip_valid)
406 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
407 flow_info = &tun_tbl[i].tun_flow_info[j];
408 if (ulp_chk_and_rem_tun_i_flow(&tun_tbl[i],
409 flow_info, fid) == true)