1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
8 #include <rte_malloc.h>
11 #include "ulp_rte_parser.h"
12 #include "ulp_template_db_enum.h"
13 #include "ulp_template_struct.h"
14 #include "ulp_matcher.h"
15 #include "ulp_mapper.h"
16 #include "ulp_flow_db.h"
18 /* This function programs the outer tunnel flow in the hardware. */
20 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
21 struct bnxt_tun_cache_entry *tun_entry,
24 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
27 /* Reset the JUMP action bit in the action bitmap as we don't
28 * offload this action.
30 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
32 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
34 #ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
35 #ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG_PARSER
36 /* Dump the rte flow pattern */
37 ulp_parser_hdr_info_dump(params);
38 /* Dump the rte flow action */
39 ulp_parser_act_info_dump(params);
43 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
44 if (ret != BNXT_TF_RC_SUCCESS)
47 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
48 if (ret != BNXT_TF_RC_SUCCESS)
51 params->parent_flow = true;
52 bnxt_ulp_init_mapper_params(&mparms, params,
53 BNXT_ULP_FDB_TYPE_REGULAR);
54 mparms.tun_idx = tun_idx;
56 /* Call the ulp mapper to create the flow in the hardware. */
57 ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
61 /* Store the tunnel dmac in the tunnel cache table and use it while
62 * programming tunnel inner flow.
64 memcpy(tun_entry->t_dmac,
65 ¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
68 tun_entry->tun_flow_info[params->port_id].state =
69 BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
70 tun_entry->outer_tun_flow_id = params->fid;
72 /* Tunnel outer flow and it's related inner flows are correlated
73 * based on Tunnel Destination IP Address.
75 if (tun_entry->t_dst_ip_valid)
77 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
78 memcpy(&tun_entry->t_dst_ip,
79 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
82 memcpy(tun_entry->t_dst_ip6,
83 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
84 sizeof(tun_entry->t_dst_ip6));
85 tun_entry->t_dst_ip_valid = true;
88 return BNXT_TF_RC_FID;
91 memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
92 return BNXT_TF_RC_ERROR;
95 /* This function programs the inner tunnel flow in the hardware. */
97 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
98 struct ulp_rte_parser_params *tun_o_params)
100 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
101 struct ulp_per_port_flow_info *flow_info;
102 struct ulp_rte_parser_params *inner_params;
105 /* Tunnel inner flow doesn't have tunnel dmac, use the tunnel
106 * dmac that was stored during F1 programming.
108 flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
109 STAILQ_FOREACH(inner_params, &flow_info->tun_i_prms_list, next) {
110 memcpy(&inner_params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
111 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
112 inner_params->parent_fid = tun_entry->outer_tun_flow_id;
114 bnxt_ulp_init_mapper_params(&mparms, inner_params,
115 BNXT_ULP_FDB_TYPE_REGULAR);
117 ret = ulp_mapper_flow_create(inner_params->ulp_ctx, &mparms);
120 "Failed to create inner tun flow, FID:%u.",
125 /* This function either install outer tunnel flow & inner tunnel flow
126 * or just the outer tunnel flow based on the flow state.
129 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
130 struct bnxt_tun_cache_entry *tun_entry,
135 ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
136 if (ret == BNXT_TF_RC_ERROR) {
137 PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
141 /* Install any cached tunnel inner flows that came before tunnel
144 ulp_install_inner_tun_flow(tun_entry, params);
146 return BNXT_TF_RC_FID;
149 /* This function will be called if inner tunnel flow request comes before
150 * outer tunnel flow request.
153 ulp_post_process_cache_inner_tun_flow(struct ulp_rte_parser_params *params,
154 struct bnxt_tun_cache_entry *tun_entry)
156 struct ulp_rte_parser_params *inner_tun_params;
157 struct ulp_per_port_flow_info *flow_info;
160 #ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
161 #ifdef RTE_LIBRTE_BNXT_TRUFLOW_DEBUG_PARSER
162 /* Dump the rte flow pattern */
163 ulp_parser_hdr_info_dump(params);
164 /* Dump the rte flow action */
165 ulp_parser_act_info_dump(params);
169 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
170 if (ret != BNXT_TF_RC_SUCCESS)
171 return BNXT_TF_RC_ERROR;
173 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
174 if (ret != BNXT_TF_RC_SUCCESS)
175 return BNXT_TF_RC_ERROR;
177 /* If Tunnel inner flow comes first then we can't install it in the
178 * hardware, because, Tunnel inner flow will not have L2 context
179 * information. So, just cache the Tunnel inner flow information
180 * and program it in the context of F1 flow installation.
182 flow_info = &tun_entry->tun_flow_info[params->port_id];
183 inner_tun_params = rte_zmalloc("ulp_inner_tun_params",
184 sizeof(struct ulp_rte_parser_params), 0);
185 if (!inner_tun_params)
186 return BNXT_TF_RC_ERROR;
187 memcpy(inner_tun_params, params, sizeof(struct ulp_rte_parser_params));
188 STAILQ_INSERT_TAIL(&flow_info->tun_i_prms_list, inner_tun_params,
190 flow_info->tun_i_cnt++;
192 /* F1 and it's related Tunnel inner flows are correlated based on
193 * Tunnel Destination IP Address. It could be already set, if
194 * the inner flow got offloaded first.
196 if (tun_entry->t_dst_ip_valid)
198 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
199 memcpy(&tun_entry->t_dst_ip,
200 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
203 memcpy(tun_entry->t_dst_ip6,
204 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
205 sizeof(tun_entry->t_dst_ip6));
206 tun_entry->t_dst_ip_valid = true;
209 return BNXT_TF_RC_FID;
212 /* This function will be called if inner tunnel flow request comes after
213 * the outer tunnel flow request.
216 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
217 struct bnxt_tun_cache_entry *tun_entry)
219 memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
220 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
222 params->parent_fid = tun_entry->outer_tun_flow_id;
224 return BNXT_TF_RC_NORMAL;
228 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
229 struct bnxt_tun_cache_entry **tun_entry,
232 int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
233 struct bnxt_tun_cache_entry *tun_tbl;
234 bool tun_entry_found = false, free_entry_found = false;
236 tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
238 return BNXT_TF_RC_ERROR;
240 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
241 if (!memcmp(&tun_tbl[i].t_dst_ip,
242 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
243 sizeof(rte_be32_t)) ||
244 !memcmp(&tun_tbl[i].t_dst_ip6,
245 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
247 tun_entry_found = true;
251 if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
252 first_free_entry = i;
253 free_entry_found = true;
257 if (tun_entry_found) {
258 *tun_entry = &tun_tbl[i];
261 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
262 return BNXT_TF_RC_ERROR;
263 *tun_entry = &tun_tbl[first_free_entry];
264 *tun_idx = first_free_entry;
271 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
273 bool inner_tun_sig, cache_inner_tun_flow;
274 bool outer_tun_reject, outer_tun_flow, inner_tun_flow;
275 enum bnxt_ulp_tun_flow_state flow_state;
276 struct bnxt_tun_cache_entry *tun_entry;
277 uint32_t l3_tun, l3_tun_decap;
281 /* Computational fields that indicate it's a TUNNEL DECAP flow */
282 l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
283 l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
284 BNXT_ULP_CF_IDX_L3_TUN_DECAP);
286 return BNXT_TF_RC_NORMAL;
288 rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
289 if (rc == BNXT_TF_RC_ERROR)
292 if (params->port_id >= RTE_MAX_ETHPORTS)
293 return BNXT_TF_RC_ERROR;
294 flow_state = tun_entry->tun_flow_info[params->port_id].state;
295 /* Outer tunnel flow validation */
296 outer_tun_flow = BNXT_OUTER_TUN_FLOW(l3_tun, params);
297 outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
300 /* Inner tunnel flow validation */
301 inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
302 cache_inner_tun_flow = BNXT_CACHE_INNER_TUN_FLOW(flow_state,
304 inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
306 if (outer_tun_reject) {
307 tun_entry->outer_tun_rej_cnt++;
309 "Tunnel F1 flow rejected, COUNT: %d\n",
310 tun_entry->outer_tun_rej_cnt);
313 if (outer_tun_reject)
314 return BNXT_TF_RC_ERROR;
315 else if (cache_inner_tun_flow)
316 return ulp_post_process_cache_inner_tun_flow(params, tun_entry);
317 else if (outer_tun_flow)
318 return ulp_post_process_outer_tun_flow(params, tun_entry,
320 else if (inner_tun_flow)
321 return ulp_post_process_inner_tun_flow(params, tun_entry);
323 return BNXT_TF_RC_NORMAL;
327 ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl)
329 struct ulp_per_port_flow_info *flow_info;
332 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
333 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
334 flow_info = &tun_tbl[i].tun_flow_info[j];
335 STAILQ_INIT(&flow_info->tun_i_prms_list);
341 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
343 struct ulp_rte_parser_params *inner_params;
344 struct ulp_per_port_flow_info *flow_info;
347 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
348 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
349 STAILQ_FOREACH(inner_params,
350 &flow_info->tun_i_prms_list,
352 STAILQ_REMOVE(&flow_info->tun_i_prms_list,
354 ulp_rte_parser_params, next);
355 rte_free(inner_params);
359 memset(&tun_tbl[tun_idx], 0,
360 sizeof(struct bnxt_tun_cache_entry));
362 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
363 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
364 STAILQ_INIT(&flow_info->tun_i_prms_list);
369 ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,
370 struct ulp_per_port_flow_info *flow_info,
373 struct ulp_rte_parser_params *inner_params;
376 STAILQ_FOREACH(inner_params,
377 &flow_info->tun_i_prms_list,
379 if (inner_params->fid == fid) {
380 STAILQ_REMOVE(&flow_info->tun_i_prms_list,
382 ulp_rte_parser_params,
384 rte_free(inner_params);
385 flow_info->tun_i_cnt--;
386 /* When a dpdk application offloads a duplicate
387 * tunnel inner flow on a port that it is not
388 * destined to, there won't be a tunnel outer flow
389 * associated with these duplicate tunnel inner flows.
390 * So, when the last tunnel inner flow ages out, the
391 * driver has to clear the tunnel entry, otherwise
392 * the tunnel entry cannot be reused.
394 if (!flow_info->tun_i_cnt &&
395 flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {
397 sizeof(struct bnxt_tun_cache_entry));
398 for (j = 0; j < RTE_MAX_ETHPORTS; j++)
399 STAILQ_INIT(&flow_info->tun_i_prms_list);
408 /* When a dpdk application offloads the same tunnel inner flow
409 * on all the uplink ports, a tunnel inner flow entry is cached
410 * even if it is not for the right uplink port. Such tunnel
411 * inner flows will eventually get aged out as there won't be
412 * any traffic on these ports. When such a flow destroy is
413 * called, cleanup the tunnel inner flow entry.
416 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
418 struct ulp_per_port_flow_info *flow_info;
421 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
422 if (!tun_tbl[i].t_dst_ip_valid)
424 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
425 flow_info = &tun_tbl[i].tun_flow_info[j];
426 if (ulp_chk_and_rem_tun_i_flow(&tun_tbl[i],
427 flow_info, fid) == true)