1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2020 Broadcom
6 #include <rte_malloc.h>
9 #include "ulp_rte_parser.h"
10 #include "ulp_template_db_enum.h"
11 #include "ulp_template_struct.h"
12 #include "ulp_matcher.h"
13 #include "ulp_mapper.h"
14 #include "ulp_flow_db.h"
16 /* This function programs the outer tunnel flow in the hardware. */
18 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
19 struct bnxt_tun_cache_entry *tun_entry,
22 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
25 /* Reset the JUMP action bit in the action bitmap as we don't
26 * offload this action.
28 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
30 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
32 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
33 if (ret != BNXT_TF_RC_SUCCESS)
36 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
37 if (ret != BNXT_TF_RC_SUCCESS)
40 params->parent_flow = true;
41 bnxt_ulp_init_mapper_params(&mparms, params,
42 BNXT_ULP_FDB_TYPE_REGULAR);
43 mparms.tun_idx = tun_idx;
45 /* Call the ulp mapper to create the flow in the hardware. */
46 ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
50 /* Store the tunnel dmac in the tunnel cache table and use it while
51 * programming tunnel flow F2.
53 memcpy(tun_entry->t_dmac,
54 ¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
57 tun_entry->valid = true;
58 tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
59 tun_entry->outer_tun_flow_id = params->fid;
61 /* F1 and it's related F2s are correlated based on
62 * Tunnel Destination IP Address.
64 if (tun_entry->t_dst_ip_valid)
66 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
67 memcpy(&tun_entry->t_dst_ip,
68 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
71 memcpy(tun_entry->t_dst_ip6,
72 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
73 sizeof(tun_entry->t_dst_ip6));
74 tun_entry->t_dst_ip_valid = true;
77 return BNXT_TF_RC_FID;
80 memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
81 return BNXT_TF_RC_ERROR;
84 /* This function programs the inner tunnel flow in the hardware. */
86 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry)
88 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
89 struct ulp_rte_parser_params *params;
92 /* F2 doesn't have tunnel dmac, use the tunnel dmac that was
93 * stored during F1 programming.
95 params = &tun_entry->first_inner_tun_params;
96 memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
97 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
98 params->parent_fid = tun_entry->outer_tun_flow_id;
99 params->fid = tun_entry->first_inner_tun_flow_id;
101 bnxt_ulp_init_mapper_params(&mparms, params,
102 BNXT_ULP_FDB_TYPE_REGULAR);
104 ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
106 PMD_DRV_LOG(ERR, "Failed to create F2 flow.");
109 /* This function either install outer tunnel flow & inner tunnel flow
110 * or just the outer tunnel flow based on the flow state.
113 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
114 struct bnxt_tun_cache_entry *tun_entry,
117 enum bnxt_ulp_tun_flow_state flow_state;
120 flow_state = tun_entry->state;
121 ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
125 /* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
126 * F1, that means F2 is not deferred. Hence, no need to install F2.
128 if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
129 ulp_install_inner_tun_flow(tun_entry);
134 /* This function will be called if inner tunnel flow request comes before
135 * outer tunnel flow request.
138 ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
139 struct bnxt_tun_cache_entry *tun_entry)
143 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
144 if (ret != BNXT_TF_RC_SUCCESS)
145 return BNXT_TF_RC_ERROR;
147 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
148 if (ret != BNXT_TF_RC_SUCCESS)
149 return BNXT_TF_RC_ERROR;
151 /* If Tunnel F2 flow comes first then we can't install it in the
152 * hardware, because, F2 flow will not have L2 context information.
153 * So, just cache the F2 information and program it in the context
154 * of F1 flow installation.
156 memcpy(&tun_entry->first_inner_tun_params, params,
157 sizeof(struct ulp_rte_parser_params));
159 tun_entry->first_inner_tun_flow_id = params->fid;
160 tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
162 /* F1 and it's related F2s are correlated based on
163 * Tunnel Destination IP Address. It could be already set, if
164 * the inner flow got offloaded first.
166 if (tun_entry->t_dst_ip_valid)
168 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
169 memcpy(&tun_entry->t_dst_ip,
170 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
173 memcpy(tun_entry->t_dst_ip6,
174 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
175 sizeof(tun_entry->t_dst_ip6));
176 tun_entry->t_dst_ip_valid = true;
179 return BNXT_TF_RC_FID;
182 /* This function will be called if inner tunnel flow request comes after
183 * the outer tunnel flow request.
186 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
187 struct bnxt_tun_cache_entry *tun_entry)
189 memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
190 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
192 params->parent_fid = tun_entry->outer_tun_flow_id;
194 return BNXT_TF_RC_NORMAL;
198 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
199 struct bnxt_tun_cache_entry **tun_entry,
202 int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
203 struct bnxt_tun_cache_entry *tun_tbl;
204 bool tun_entry_found = false, free_entry_found = false;
206 tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
208 return BNXT_TF_RC_ERROR;
210 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
211 if (!memcmp(&tun_tbl[i].t_dst_ip,
212 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
213 sizeof(rte_be32_t)) ||
214 !memcmp(&tun_tbl[i].t_dst_ip6,
215 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
217 tun_entry_found = true;
221 if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
222 first_free_entry = i;
223 free_entry_found = true;
227 if (tun_entry_found) {
228 *tun_entry = &tun_tbl[i];
231 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
232 return BNXT_TF_RC_ERROR;
233 *tun_entry = &tun_tbl[first_free_entry];
234 *tun_idx = first_free_entry;
241 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
243 bool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;
244 bool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;
245 enum bnxt_ulp_tun_flow_state flow_state;
246 struct bnxt_tun_cache_entry *tun_entry;
247 uint32_t l3_tun, l3_tun_decap;
251 /* Computational fields that indicate it's a TUNNEL DECAP flow */
252 l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
253 l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
254 BNXT_ULP_CF_IDX_L3_TUN_DECAP);
256 return BNXT_TF_RC_NORMAL;
258 rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
259 if (rc == BNXT_TF_RC_ERROR)
262 flow_state = tun_entry->state;
263 /* Outer tunnel flow validation */
264 outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
265 outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
266 outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
269 /* Inner tunnel flow validation */
270 inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
271 first_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,
273 inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
274 inner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,
277 if (outer_tun_reject) {
278 tun_entry->outer_tun_rej_cnt++;
280 "Tunnel F1 flow rejected, COUNT: %d\n",
281 tun_entry->outer_tun_rej_cnt);
282 /* Inner tunnel flow is rejected if it comes between first inner
283 * tunnel flow and outer flow requests.
285 } else if (inner_tun_reject) {
286 tun_entry->inner_tun_rej_cnt++;
288 "Tunnel F2 flow rejected, COUNT: %d\n",
289 tun_entry->inner_tun_rej_cnt);
292 if (outer_tun_reject || inner_tun_reject)
293 return BNXT_TF_RC_ERROR;
294 else if (first_inner_tun_flow)
295 return ulp_post_process_first_inner_tun_flow(params, tun_entry);
296 else if (outer_tun_flow)
297 return ulp_post_process_outer_tun_flow(params, tun_entry,
299 else if (inner_tun_flow)
300 return ulp_post_process_inner_tun_flow(params, tun_entry);
302 return BNXT_TF_RC_NORMAL;
306 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
308 memset(&tun_tbl[tun_idx], 0,
309 sizeof(struct bnxt_tun_cache_entry));