1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2021 Broadcom
6 #include <rte_malloc.h>
9 #include "ulp_rte_parser.h"
10 #include "ulp_template_db_enum.h"
11 #include "ulp_template_struct.h"
12 #include "ulp_matcher.h"
13 #include "ulp_mapper.h"
14 #include "ulp_flow_db.h"
16 /* This function programs the outer tunnel flow in the hardware. */
18 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
19 struct bnxt_tun_cache_entry *tun_entry,
22 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
25 /* Reset the JUMP action bit in the action bitmap as we don't
26 * offload this action.
28 ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
30 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
32 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
33 if (ret != BNXT_TF_RC_SUCCESS)
36 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
37 if (ret != BNXT_TF_RC_SUCCESS)
40 params->parent_flow = true;
41 bnxt_ulp_init_mapper_params(&mparms, params,
42 BNXT_ULP_FDB_TYPE_REGULAR);
43 mparms.tun_idx = tun_idx;
45 /* Call the ulp mapper to create the flow in the hardware. */
46 ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
50 /* Store the tunnel dmac in the tunnel cache table and use it while
51 * programming tunnel flow F2.
53 memcpy(tun_entry->t_dmac,
54 ¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
57 tun_entry->valid = true;
58 tun_entry->tun_flow_info[params->port_id].state =
59 BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
60 tun_entry->outer_tun_flow_id = params->fid;
62 /* F1 and it's related F2s are correlated based on
63 * Tunnel Destination IP Address.
65 if (tun_entry->t_dst_ip_valid)
67 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
68 memcpy(&tun_entry->t_dst_ip,
69 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
72 memcpy(tun_entry->t_dst_ip6,
73 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
74 sizeof(tun_entry->t_dst_ip6));
75 tun_entry->t_dst_ip_valid = true;
78 return BNXT_TF_RC_FID;
81 memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
82 return BNXT_TF_RC_ERROR;
85 /* This function programs the inner tunnel flow in the hardware. */
87 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
88 struct ulp_rte_parser_params *tun_o_params)
90 struct bnxt_ulp_mapper_create_parms mparms = { 0 };
91 struct ulp_per_port_flow_info *flow_info;
92 struct ulp_rte_parser_params *params;
95 /* F2 doesn't have tunnel dmac, use the tunnel dmac that was
96 * stored during F1 programming.
98 flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
99 params = &flow_info->first_inner_tun_params;
100 memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
101 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
102 params->parent_fid = tun_entry->outer_tun_flow_id;
103 params->fid = flow_info->first_tun_i_fid;
105 bnxt_ulp_init_mapper_params(&mparms, params,
106 BNXT_ULP_FDB_TYPE_REGULAR);
108 ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
110 PMD_DRV_LOG(ERR, "Failed to create F2 flow.");
113 /* This function either install outer tunnel flow & inner tunnel flow
114 * or just the outer tunnel flow based on the flow state.
117 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
118 struct bnxt_tun_cache_entry *tun_entry,
121 enum bnxt_ulp_tun_flow_state flow_state;
124 flow_state = tun_entry->tun_flow_info[params->port_id].state;
125 ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
126 if (ret == BNXT_TF_RC_ERROR) {
127 PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
131 /* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
132 * F1, that means F2 is not deferred. Hence, no need to install F2.
134 if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
135 ulp_install_inner_tun_flow(tun_entry, params);
137 return BNXT_TF_RC_FID;
140 /* This function will be called if inner tunnel flow request comes before
141 * outer tunnel flow request.
144 ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
145 struct bnxt_tun_cache_entry *tun_entry)
147 struct ulp_per_port_flow_info *flow_info;
150 ret = ulp_matcher_pattern_match(params, ¶ms->class_id);
151 if (ret != BNXT_TF_RC_SUCCESS)
152 return BNXT_TF_RC_ERROR;
154 ret = ulp_matcher_action_match(params, ¶ms->act_tmpl);
155 if (ret != BNXT_TF_RC_SUCCESS)
156 return BNXT_TF_RC_ERROR;
158 /* If Tunnel F2 flow comes first then we can't install it in the
159 * hardware, because, F2 flow will not have L2 context information.
160 * So, just cache the F2 information and program it in the context
161 * of F1 flow installation.
163 flow_info = &tun_entry->tun_flow_info[params->port_id];
164 memcpy(&flow_info->first_inner_tun_params, params,
165 sizeof(struct ulp_rte_parser_params));
167 flow_info->first_tun_i_fid = params->fid;
168 flow_info->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
170 /* F1 and it's related F2s are correlated based on
171 * Tunnel Destination IP Address. It could be already set, if
172 * the inner flow got offloaded first.
174 if (tun_entry->t_dst_ip_valid)
176 if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
177 memcpy(&tun_entry->t_dst_ip,
178 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
181 memcpy(tun_entry->t_dst_ip6,
182 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
183 sizeof(tun_entry->t_dst_ip6));
184 tun_entry->t_dst_ip_valid = true;
187 return BNXT_TF_RC_FID;
190 /* This function will be called if inner tunnel flow request comes after
191 * the outer tunnel flow request.
194 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
195 struct bnxt_tun_cache_entry *tun_entry)
197 memcpy(¶ms->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
198 tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
200 params->parent_fid = tun_entry->outer_tun_flow_id;
202 return BNXT_TF_RC_NORMAL;
206 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
207 struct bnxt_tun_cache_entry **tun_entry,
210 int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
211 struct bnxt_tun_cache_entry *tun_tbl;
212 bool tun_entry_found = false, free_entry_found = false;
214 tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
216 return BNXT_TF_RC_ERROR;
218 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
219 if (!memcmp(&tun_tbl[i].t_dst_ip,
220 ¶ms->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
221 sizeof(rte_be32_t)) ||
222 !memcmp(&tun_tbl[i].t_dst_ip6,
223 ¶ms->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
225 tun_entry_found = true;
229 if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
230 first_free_entry = i;
231 free_entry_found = true;
235 if (tun_entry_found) {
236 *tun_entry = &tun_tbl[i];
239 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
240 return BNXT_TF_RC_ERROR;
241 *tun_entry = &tun_tbl[first_free_entry];
242 *tun_idx = first_free_entry;
249 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
251 bool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;
252 bool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;
253 enum bnxt_ulp_tun_flow_state flow_state;
254 struct bnxt_tun_cache_entry *tun_entry;
255 uint32_t l3_tun, l3_tun_decap;
259 /* Computational fields that indicate it's a TUNNEL DECAP flow */
260 l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
261 l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
262 BNXT_ULP_CF_IDX_L3_TUN_DECAP);
264 return BNXT_TF_RC_NORMAL;
266 rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
267 if (rc == BNXT_TF_RC_ERROR)
270 flow_state = tun_entry->tun_flow_info[params->port_id].state;
271 /* Outer tunnel flow validation */
272 outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
273 outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
274 outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
277 /* Inner tunnel flow validation */
278 inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
279 first_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,
281 inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
282 inner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,
285 if (outer_tun_reject) {
286 tun_entry->outer_tun_rej_cnt++;
288 "Tunnel F1 flow rejected, COUNT: %d\n",
289 tun_entry->outer_tun_rej_cnt);
290 /* Inner tunnel flow is rejected if it comes between first inner
291 * tunnel flow and outer flow requests.
293 } else if (inner_tun_reject) {
294 tun_entry->inner_tun_rej_cnt++;
296 "Tunnel F2 flow rejected, COUNT: %d\n",
297 tun_entry->inner_tun_rej_cnt);
300 if (outer_tun_reject || inner_tun_reject)
301 return BNXT_TF_RC_ERROR;
302 else if (first_inner_tun_flow)
303 return ulp_post_process_first_inner_tun_flow(params, tun_entry);
304 else if (outer_tun_flow)
305 return ulp_post_process_outer_tun_flow(params, tun_entry,
307 else if (inner_tun_flow)
308 return ulp_post_process_inner_tun_flow(params, tun_entry);
310 return BNXT_TF_RC_NORMAL;
314 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
316 memset(&tun_tbl[tun_idx], 0,
317 sizeof(struct bnxt_tun_cache_entry));
320 /* When a dpdk application offloads the same tunnel inner flow
321 * on all the uplink ports, a tunnel inner flow entry is cached
322 * even if it is not for the right uplink port. Such tunnel
323 * inner flows will eventually get aged out as there won't be
324 * any traffic on these ports. When such a flow destroy is
325 * called, cleanup the tunnel inner flow entry.
328 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
330 struct ulp_per_port_flow_info *flow_info;
333 for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES ; i++) {
334 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
335 flow_info = &tun_tbl[i].tun_flow_info[j];
336 if (flow_info->first_tun_i_fid == fid &&
337 flow_info->state == BNXT_ULP_FLOW_STATE_TUN_I_CACHED)
338 memset(flow_info, 0, sizeof(*flow_info));