cb8530d7910a4eedcf32692a970461e165004b95
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_tun.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <rte_malloc.h>
7
8 #include "ulp_tun.h"
9 #include "ulp_rte_parser.h"
10 #include "ulp_template_db_enum.h"
11 #include "ulp_template_struct.h"
12 #include "ulp_matcher.h"
13 #include "ulp_mapper.h"
14 #include "ulp_flow_db.h"
15
16 /* This function programs the outer tunnel flow in the hardware. */
17 static int32_t
18 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
19                            struct bnxt_tun_cache_entry *tun_entry,
20                            uint16_t tun_idx)
21 {
22         struct bnxt_ulp_mapper_create_parms mparms = { 0 };
23         int ret;
24
25         /* Reset the JUMP action bit in the action bitmap as we don't
26          * offload this action.
27          */
28         ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
29
30         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
31
32         ret = ulp_matcher_pattern_match(params, &params->class_id);
33         if (ret != BNXT_TF_RC_SUCCESS)
34                 goto err;
35
36         ret = ulp_matcher_action_match(params, &params->act_tmpl);
37         if (ret != BNXT_TF_RC_SUCCESS)
38                 goto err;
39
40         params->parent_flow = true;
41         bnxt_ulp_init_mapper_params(&mparms, params,
42                                     BNXT_ULP_FDB_TYPE_REGULAR);
43         mparms.tun_idx = tun_idx;
44
45         /* Call the ulp mapper to create the flow in the hardware. */
46         ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
47         if (ret)
48                 goto err;
49
50         /* Store the tunnel dmac in the tunnel cache table and use it while
51          * programming tunnel inner flow.
52          */
53         memcpy(tun_entry->t_dmac,
54                &params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
55                RTE_ETHER_ADDR_LEN);
56
57         tun_entry->tun_flow_info[params->port_id].state =
58                                 BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
59         tun_entry->outer_tun_flow_id = params->fid;
60
61         /* Tunnel outer flow  and it's related inner flows are correlated
62          * based on Tunnel Destination IP Address.
63          */
64         if (tun_entry->t_dst_ip_valid)
65                 goto done;
66         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
67                 memcpy(&tun_entry->t_dst_ip,
68                        &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
69                        sizeof(rte_be32_t));
70         else
71                 memcpy(tun_entry->t_dst_ip6,
72                        &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
73                        sizeof(tun_entry->t_dst_ip6));
74         tun_entry->t_dst_ip_valid = true;
75
76 done:
77         return BNXT_TF_RC_FID;
78
79 err:
80         memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
81         return BNXT_TF_RC_ERROR;
82 }
83
84 /* This function programs the inner tunnel flow in the hardware. */
85 static void
86 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
87                            struct ulp_rte_parser_params *tun_o_params)
88 {
89         struct bnxt_ulp_mapper_create_parms mparms = { 0 };
90         struct ulp_per_port_flow_info *flow_info;
91         struct ulp_rte_parser_params *inner_params;
92         int ret;
93
94         /* Tunnel inner flow doesn't have tunnel dmac, use the tunnel
95          * dmac that was stored during F1 programming.
96          */
97         flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
98         STAILQ_FOREACH(inner_params, &flow_info->tun_i_prms_list, next) {
99                 memcpy(&inner_params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
100                        tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
101                 inner_params->parent_fid = tun_entry->outer_tun_flow_id;
102
103                 bnxt_ulp_init_mapper_params(&mparms, inner_params,
104                                             BNXT_ULP_FDB_TYPE_REGULAR);
105
106                 ret = ulp_mapper_flow_create(inner_params->ulp_ctx, &mparms);
107                 if (ret)
108                         PMD_DRV_LOG(ERR,
109                                     "Failed to create inner tun flow, FID:%u.",
110                                     inner_params->fid);
111         }
112 }
113
114 /* This function either install outer tunnel flow & inner tunnel flow
115  * or just the outer tunnel flow based on the flow state.
116  */
117 static int32_t
118 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
119                              struct bnxt_tun_cache_entry *tun_entry,
120                              uint16_t tun_idx)
121 {
122         int ret;
123
124         ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
125         if (ret == BNXT_TF_RC_ERROR) {
126                 PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
127                 return ret;
128         }
129
130         /* Install any cached tunnel inner flows that came before tunnel
131          * outer flow.
132          */
133         ulp_install_inner_tun_flow(tun_entry, params);
134
135         return BNXT_TF_RC_FID;
136 }
137
138 /* This function will be called if inner tunnel flow request comes before
139  * outer tunnel flow request.
140  */
141 static int32_t
142 ulp_post_process_cache_inner_tun_flow(struct ulp_rte_parser_params *params,
143                                       struct bnxt_tun_cache_entry *tun_entry)
144 {
145         struct ulp_rte_parser_params *inner_tun_params;
146         struct ulp_per_port_flow_info *flow_info;
147         int ret;
148
149         ret = ulp_matcher_pattern_match(params, &params->class_id);
150         if (ret != BNXT_TF_RC_SUCCESS)
151                 return BNXT_TF_RC_ERROR;
152
153         ret = ulp_matcher_action_match(params, &params->act_tmpl);
154         if (ret != BNXT_TF_RC_SUCCESS)
155                 return BNXT_TF_RC_ERROR;
156
157         /* If Tunnel inner flow comes first then we can't install it in the
158          * hardware, because, Tunnel inner flow will not have L2 context
159          * information. So, just cache the Tunnel inner flow information
160          * and program it in the context of F1 flow installation.
161          */
162         flow_info = &tun_entry->tun_flow_info[params->port_id];
163         inner_tun_params = rte_zmalloc("ulp_inner_tun_params",
164                                        sizeof(struct ulp_rte_parser_params), 0);
165         if (!inner_tun_params)
166                 return BNXT_TF_RC_ERROR;
167         memcpy(inner_tun_params, params, sizeof(struct ulp_rte_parser_params));
168         STAILQ_INSERT_TAIL(&flow_info->tun_i_prms_list, inner_tun_params,
169                            next);
170         flow_info->tun_i_cnt++;
171
172         /* F1 and it's related Tunnel inner flows are correlated based on
173          * Tunnel Destination IP Address. It could be already set, if
174          * the inner flow got offloaded first.
175          */
176         if (tun_entry->t_dst_ip_valid)
177                 goto done;
178         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
179                 memcpy(&tun_entry->t_dst_ip,
180                        &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
181                        sizeof(rte_be32_t));
182         else
183                 memcpy(tun_entry->t_dst_ip6,
184                        &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
185                        sizeof(tun_entry->t_dst_ip6));
186         tun_entry->t_dst_ip_valid = true;
187
188 done:
189         return BNXT_TF_RC_FID;
190 }
191
192 /* This function will be called if inner tunnel flow request comes after
193  * the outer tunnel flow request.
194  */
195 static int32_t
196 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
197                                 struct bnxt_tun_cache_entry *tun_entry)
198 {
199         memcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
200                tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
201
202         params->parent_fid = tun_entry->outer_tun_flow_id;
203
204         return BNXT_TF_RC_NORMAL;
205 }
206
207 static int32_t
208 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
209                   struct bnxt_tun_cache_entry **tun_entry,
210                   uint16_t *tun_idx)
211 {
212         int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
213         struct bnxt_tun_cache_entry *tun_tbl;
214         bool tun_entry_found = false, free_entry_found = false;
215
216         tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
217         if (!tun_tbl)
218                 return BNXT_TF_RC_ERROR;
219
220         for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
221                 if (!memcmp(&tun_tbl[i].t_dst_ip,
222                             &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
223                             sizeof(rte_be32_t)) ||
224                     !memcmp(&tun_tbl[i].t_dst_ip6,
225                             &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
226                             16)) {
227                         tun_entry_found = true;
228                         break;
229                 }
230
231                 if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
232                         first_free_entry = i;
233                         free_entry_found = true;
234                 }
235         }
236
237         if (tun_entry_found) {
238                 *tun_entry = &tun_tbl[i];
239                 *tun_idx = i;
240         } else {
241                 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
242                         return BNXT_TF_RC_ERROR;
243                 *tun_entry = &tun_tbl[first_free_entry];
244                 *tun_idx = first_free_entry;
245         }
246
247         return 0;
248 }
249
250 int32_t
251 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
252 {
253         bool inner_tun_sig, cache_inner_tun_flow;
254         bool outer_tun_reject, outer_tun_flow, inner_tun_flow;
255         enum bnxt_ulp_tun_flow_state flow_state;
256         struct bnxt_tun_cache_entry *tun_entry;
257         uint32_t l3_tun, l3_tun_decap;
258         uint16_t tun_idx;
259         int rc;
260
261         /* Computational fields that indicate it's a TUNNEL DECAP flow */
262         l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
263         l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
264                                            BNXT_ULP_CF_IDX_L3_TUN_DECAP);
265         if (!l3_tun)
266                 return BNXT_TF_RC_NORMAL;
267
268         rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
269         if (rc == BNXT_TF_RC_ERROR)
270                 return rc;
271
272         if (params->port_id >= RTE_MAX_ETHPORTS)
273                 return BNXT_TF_RC_ERROR;
274         flow_state = tun_entry->tun_flow_info[params->port_id].state;
275         /* Outer tunnel flow validation */
276         outer_tun_flow = BNXT_OUTER_TUN_FLOW(l3_tun, params);
277         outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
278                                                       outer_tun_flow);
279
280         /* Inner tunnel flow validation */
281         inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
282         cache_inner_tun_flow = BNXT_CACHE_INNER_TUN_FLOW(flow_state,
283                                                          inner_tun_sig);
284         inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
285
286         if (outer_tun_reject) {
287                 tun_entry->outer_tun_rej_cnt++;
288                 BNXT_TF_DBG(ERR,
289                             "Tunnel F1 flow rejected, COUNT: %d\n",
290                             tun_entry->outer_tun_rej_cnt);
291         }
292
293         if (outer_tun_reject)
294                 return BNXT_TF_RC_ERROR;
295         else if (cache_inner_tun_flow)
296                 return ulp_post_process_cache_inner_tun_flow(params, tun_entry);
297         else if (outer_tun_flow)
298                 return ulp_post_process_outer_tun_flow(params, tun_entry,
299                                                        tun_idx);
300         else if (inner_tun_flow)
301                 return ulp_post_process_inner_tun_flow(params, tun_entry);
302         else
303                 return BNXT_TF_RC_NORMAL;
304 }
305
306 void
307 ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl)
308 {
309         struct ulp_per_port_flow_info *flow_info;
310         int i, j;
311
312         for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
313                 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
314                         flow_info = &tun_tbl[i].tun_flow_info[j];
315                         STAILQ_INIT(&flow_info->tun_i_prms_list);
316                 }
317         }
318 }
319
320 void
321 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
322 {
323         struct ulp_rte_parser_params *inner_params;
324         struct ulp_per_port_flow_info *flow_info;
325         int j;
326
327         for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
328                 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
329                 STAILQ_FOREACH(inner_params,
330                                &flow_info->tun_i_prms_list,
331                                next) {
332                         STAILQ_REMOVE(&flow_info->tun_i_prms_list,
333                                       inner_params,
334                                       ulp_rte_parser_params, next);
335                         rte_free(inner_params);
336                 }
337         }
338
339         memset(&tun_tbl[tun_idx], 0,
340                         sizeof(struct bnxt_tun_cache_entry));
341
342         for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
343                 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
344                 STAILQ_INIT(&flow_info->tun_i_prms_list);
345         }
346 }
347
348 static bool
349 ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,
350                            struct ulp_per_port_flow_info *flow_info,
351                            uint32_t fid)
352 {
353         struct ulp_rte_parser_params *inner_params;
354         int j;
355
356         STAILQ_FOREACH(inner_params,
357                        &flow_info->tun_i_prms_list,
358                        next) {
359                 if (inner_params->fid == fid) {
360                         STAILQ_REMOVE(&flow_info->tun_i_prms_list,
361                                       inner_params,
362                                       ulp_rte_parser_params,
363                                       next);
364                         rte_free(inner_params);
365                         flow_info->tun_i_cnt--;
366                         /* When a dpdk application offloads a duplicate
367                          * tunnel inner flow on a port that it is not
368                          * destined to, there won't be a tunnel outer flow
369                          * associated with these duplicate tunnel inner flows.
370                          * So, when the last tunnel inner flow ages out, the
371                          * driver has to clear the tunnel entry, otherwise
372                          * the tunnel entry cannot be reused.
373                          */
374                         if (!flow_info->tun_i_cnt &&
375                             flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {
376                                 memset(tun_entry, 0,
377                                        sizeof(struct bnxt_tun_cache_entry));
378                                 for (j = 0; j < RTE_MAX_ETHPORTS; j++)
379                                         STAILQ_INIT(&flow_info->tun_i_prms_list);
380                         }
381                         return true;
382                 }
383         }
384
385         return false;
386 }
387
388 /* When a dpdk application offloads the same tunnel inner flow
389  * on all the uplink ports, a tunnel inner flow entry is cached
390  * even if it is not for the right uplink port. Such tunnel
391  * inner flows will eventually get aged out as there won't be
392  * any traffic on these ports. When such a flow destroy is
393  * called, cleanup the tunnel inner flow entry.
394  */
395 void
396 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
397 {
398         struct ulp_per_port_flow_info *flow_info;
399         int i, j;
400
401         for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
402                 if (!tun_tbl[i].t_dst_ip_valid)
403                         continue;
404                 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
405                         flow_info = &tun_tbl[i].tun_flow_info[j];
406                         if (ulp_chk_and_rem_tun_i_flow(&tun_tbl[i],
407                                                        flow_info, fid) == true)
408                                 return;
409                 }
410         }
411 }