6c1ae3ced2310998ac2031af6d84ded15b4a1c1e
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_tun.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_malloc.h>
9
10 #include "ulp_tun.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_template_db_enum.h"
13 #include "ulp_template_struct.h"
14 #include "ulp_matcher.h"
15 #include "ulp_mapper.h"
16 #include "ulp_flow_db.h"
17
18 /* This function programs the outer tunnel flow in the hardware. */
19 static int32_t
20 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
21                            struct bnxt_tun_cache_entry *tun_entry,
22                            uint16_t tun_idx)
23 {
24         struct bnxt_ulp_mapper_create_parms mparms = { 0 };
25         int ret;
26
27         /* Reset the JUMP action bit in the action bitmap as we don't
28          * offload this action.
29          */
30         ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
31
32         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
33
34         ret = ulp_matcher_pattern_match(params, &params->class_id);
35         if (ret != BNXT_TF_RC_SUCCESS)
36                 goto err;
37
38         ret = ulp_matcher_action_match(params, &params->act_tmpl);
39         if (ret != BNXT_TF_RC_SUCCESS)
40                 goto err;
41
42         params->parent_flow = true;
43         bnxt_ulp_init_mapper_params(&mparms, params,
44                                     BNXT_ULP_FDB_TYPE_REGULAR);
45         mparms.tun_idx = tun_idx;
46
47         /* Call the ulp mapper to create the flow in the hardware. */
48         ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
49         if (ret)
50                 goto err;
51
52         /* Store the tunnel dmac in the tunnel cache table and use it while
53          * programming tunnel inner flow.
54          */
55         memcpy(tun_entry->t_dmac,
56                &params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
57                RTE_ETHER_ADDR_LEN);
58
59         tun_entry->tun_flow_info[params->port_id].state =
60                                 BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
61         tun_entry->outer_tun_flow_id = params->fid;
62
63         /* Tunnel outer flow  and it's related inner flows are correlated
64          * based on Tunnel Destination IP Address.
65          */
66         if (tun_entry->t_dst_ip_valid)
67                 goto done;
68         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
69                 memcpy(&tun_entry->t_dst_ip,
70                        &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
71                        sizeof(rte_be32_t));
72         else
73                 memcpy(tun_entry->t_dst_ip6,
74                        &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
75                        sizeof(tun_entry->t_dst_ip6));
76         tun_entry->t_dst_ip_valid = true;
77
78 done:
79         return BNXT_TF_RC_FID;
80
81 err:
82         memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
83         return BNXT_TF_RC_ERROR;
84 }
85
86 /* This function programs the inner tunnel flow in the hardware. */
87 static void
88 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
89                            struct ulp_rte_parser_params *tun_o_params)
90 {
91         struct bnxt_ulp_mapper_create_parms mparms = { 0 };
92         struct ulp_per_port_flow_info *flow_info;
93         struct ulp_rte_parser_params *inner_params;
94         int ret;
95
96         /* Tunnel inner flow doesn't have tunnel dmac, use the tunnel
97          * dmac that was stored during F1 programming.
98          */
99         flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
100         STAILQ_FOREACH(inner_params, &flow_info->tun_i_prms_list, next) {
101                 memcpy(&inner_params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
102                        tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
103                 inner_params->parent_fid = tun_entry->outer_tun_flow_id;
104
105                 bnxt_ulp_init_mapper_params(&mparms, inner_params,
106                                             BNXT_ULP_FDB_TYPE_REGULAR);
107
108                 ret = ulp_mapper_flow_create(inner_params->ulp_ctx, &mparms);
109                 if (ret)
110                         PMD_DRV_LOG(ERR,
111                                     "Failed to create inner tun flow, FID:%u.",
112                                     inner_params->fid);
113         }
114 }
115
116 /* This function either install outer tunnel flow & inner tunnel flow
117  * or just the outer tunnel flow based on the flow state.
118  */
119 static int32_t
120 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
121                              struct bnxt_tun_cache_entry *tun_entry,
122                              uint16_t tun_idx)
123 {
124         int ret;
125
126         ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
127         if (ret == BNXT_TF_RC_ERROR) {
128                 PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
129                 return ret;
130         }
131
132         /* Install any cached tunnel inner flows that came before tunnel
133          * outer flow.
134          */
135         ulp_install_inner_tun_flow(tun_entry, params);
136
137         return BNXT_TF_RC_FID;
138 }
139
140 /* This function will be called if inner tunnel flow request comes before
141  * outer tunnel flow request.
142  */
143 static int32_t
144 ulp_post_process_cache_inner_tun_flow(struct ulp_rte_parser_params *params,
145                                       struct bnxt_tun_cache_entry *tun_entry)
146 {
147         struct ulp_rte_parser_params *inner_tun_params;
148         struct ulp_per_port_flow_info *flow_info;
149         int ret;
150
151         ret = ulp_matcher_pattern_match(params, &params->class_id);
152         if (ret != BNXT_TF_RC_SUCCESS)
153                 return BNXT_TF_RC_ERROR;
154
155         ret = ulp_matcher_action_match(params, &params->act_tmpl);
156         if (ret != BNXT_TF_RC_SUCCESS)
157                 return BNXT_TF_RC_ERROR;
158
159         /* If Tunnel inner flow comes first then we can't install it in the
160          * hardware, because, Tunnel inner flow will not have L2 context
161          * information. So, just cache the Tunnel inner flow information
162          * and program it in the context of F1 flow installation.
163          */
164         flow_info = &tun_entry->tun_flow_info[params->port_id];
165         inner_tun_params = rte_zmalloc("ulp_inner_tun_params",
166                                        sizeof(struct ulp_rte_parser_params), 0);
167         if (!inner_tun_params)
168                 return BNXT_TF_RC_ERROR;
169         memcpy(inner_tun_params, params, sizeof(struct ulp_rte_parser_params));
170         STAILQ_INSERT_TAIL(&flow_info->tun_i_prms_list, inner_tun_params,
171                            next);
172         flow_info->tun_i_cnt++;
173
174         /* F1 and it's related Tunnel inner flows are correlated based on
175          * Tunnel Destination IP Address. It could be already set, if
176          * the inner flow got offloaded first.
177          */
178         if (tun_entry->t_dst_ip_valid)
179                 goto done;
180         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
181                 memcpy(&tun_entry->t_dst_ip,
182                        &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
183                        sizeof(rte_be32_t));
184         else
185                 memcpy(tun_entry->t_dst_ip6,
186                        &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
187                        sizeof(tun_entry->t_dst_ip6));
188         tun_entry->t_dst_ip_valid = true;
189
190 done:
191         return BNXT_TF_RC_FID;
192 }
193
194 /* This function will be called if inner tunnel flow request comes after
195  * the outer tunnel flow request.
196  */
197 static int32_t
198 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
199                                 struct bnxt_tun_cache_entry *tun_entry)
200 {
201         memcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
202                tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
203
204         params->parent_fid = tun_entry->outer_tun_flow_id;
205
206         return BNXT_TF_RC_NORMAL;
207 }
208
209 static int32_t
210 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
211                   struct bnxt_tun_cache_entry **tun_entry,
212                   uint16_t *tun_idx)
213 {
214         int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
215         struct bnxt_tun_cache_entry *tun_tbl;
216         bool tun_entry_found = false, free_entry_found = false;
217
218         tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
219         if (!tun_tbl)
220                 return BNXT_TF_RC_ERROR;
221
222         for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
223                 if (!memcmp(&tun_tbl[i].t_dst_ip,
224                             &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
225                             sizeof(rte_be32_t)) ||
226                     !memcmp(&tun_tbl[i].t_dst_ip6,
227                             &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
228                             16)) {
229                         tun_entry_found = true;
230                         break;
231                 }
232
233                 if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
234                         first_free_entry = i;
235                         free_entry_found = true;
236                 }
237         }
238
239         if (tun_entry_found) {
240                 *tun_entry = &tun_tbl[i];
241                 *tun_idx = i;
242         } else {
243                 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
244                         return BNXT_TF_RC_ERROR;
245                 *tun_entry = &tun_tbl[first_free_entry];
246                 *tun_idx = first_free_entry;
247         }
248
249         return 0;
250 }
251
252 int32_t
253 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
254 {
255         bool inner_tun_sig, cache_inner_tun_flow;
256         bool outer_tun_reject, outer_tun_flow, inner_tun_flow;
257         enum bnxt_ulp_tun_flow_state flow_state;
258         struct bnxt_tun_cache_entry *tun_entry;
259         uint32_t l3_tun, l3_tun_decap;
260         uint16_t tun_idx;
261         int rc;
262
263         /* Computational fields that indicate it's a TUNNEL DECAP flow */
264         l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
265         l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
266                                            BNXT_ULP_CF_IDX_L3_TUN_DECAP);
267         if (!l3_tun)
268                 return BNXT_TF_RC_NORMAL;
269
270         rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
271         if (rc == BNXT_TF_RC_ERROR)
272                 return rc;
273
274         if (params->port_id >= RTE_MAX_ETHPORTS)
275                 return BNXT_TF_RC_ERROR;
276         flow_state = tun_entry->tun_flow_info[params->port_id].state;
277         /* Outer tunnel flow validation */
278         outer_tun_flow = BNXT_OUTER_TUN_FLOW(l3_tun, params);
279         outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
280                                                       outer_tun_flow);
281
282         /* Inner tunnel flow validation */
283         inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
284         cache_inner_tun_flow = BNXT_CACHE_INNER_TUN_FLOW(flow_state,
285                                                          inner_tun_sig);
286         inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
287
288         if (outer_tun_reject) {
289                 tun_entry->outer_tun_rej_cnt++;
290                 BNXT_TF_DBG(ERR,
291                             "Tunnel F1 flow rejected, COUNT: %d\n",
292                             tun_entry->outer_tun_rej_cnt);
293         }
294
295         if (outer_tun_reject)
296                 return BNXT_TF_RC_ERROR;
297         else if (cache_inner_tun_flow)
298                 return ulp_post_process_cache_inner_tun_flow(params, tun_entry);
299         else if (outer_tun_flow)
300                 return ulp_post_process_outer_tun_flow(params, tun_entry,
301                                                        tun_idx);
302         else if (inner_tun_flow)
303                 return ulp_post_process_inner_tun_flow(params, tun_entry);
304         else
305                 return BNXT_TF_RC_NORMAL;
306 }
307
308 void
309 ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl)
310 {
311         struct ulp_per_port_flow_info *flow_info;
312         int i, j;
313
314         for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
315                 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
316                         flow_info = &tun_tbl[i].tun_flow_info[j];
317                         STAILQ_INIT(&flow_info->tun_i_prms_list);
318                 }
319         }
320 }
321
322 void
323 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
324 {
325         struct ulp_rte_parser_params *inner_params;
326         struct ulp_per_port_flow_info *flow_info;
327         int j;
328
329         for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
330                 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
331                 STAILQ_FOREACH(inner_params,
332                                &flow_info->tun_i_prms_list,
333                                next) {
334                         STAILQ_REMOVE(&flow_info->tun_i_prms_list,
335                                       inner_params,
336                                       ulp_rte_parser_params, next);
337                         rte_free(inner_params);
338                 }
339         }
340
341         memset(&tun_tbl[tun_idx], 0,
342                         sizeof(struct bnxt_tun_cache_entry));
343
344         for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
345                 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
346                 STAILQ_INIT(&flow_info->tun_i_prms_list);
347         }
348 }
349
350 static bool
351 ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,
352                            struct ulp_per_port_flow_info *flow_info,
353                            uint32_t fid)
354 {
355         struct ulp_rte_parser_params *inner_params;
356         int j;
357
358         STAILQ_FOREACH(inner_params,
359                        &flow_info->tun_i_prms_list,
360                        next) {
361                 if (inner_params->fid == fid) {
362                         STAILQ_REMOVE(&flow_info->tun_i_prms_list,
363                                       inner_params,
364                                       ulp_rte_parser_params,
365                                       next);
366                         rte_free(inner_params);
367                         flow_info->tun_i_cnt--;
368                         /* When a dpdk application offloads a duplicate
369                          * tunnel inner flow on a port that it is not
370                          * destined to, there won't be a tunnel outer flow
371                          * associated with these duplicate tunnel inner flows.
372                          * So, when the last tunnel inner flow ages out, the
373                          * driver has to clear the tunnel entry, otherwise
374                          * the tunnel entry cannot be reused.
375                          */
376                         if (!flow_info->tun_i_cnt &&
377                             flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {
378                                 memset(tun_entry, 0,
379                                        sizeof(struct bnxt_tun_cache_entry));
380                                 for (j = 0; j < RTE_MAX_ETHPORTS; j++)
381                                         STAILQ_INIT(&flow_info->tun_i_prms_list);
382                         }
383                         return true;
384                 }
385         }
386
387         return false;
388 }
389
390 /* When a dpdk application offloads the same tunnel inner flow
391  * on all the uplink ports, a tunnel inner flow entry is cached
392  * even if it is not for the right uplink port. Such tunnel
393  * inner flows will eventually get aged out as there won't be
394  * any traffic on these ports. When such a flow destroy is
395  * called, cleanup the tunnel inner flow entry.
396  */
397 void
398 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
399 {
400         struct ulp_per_port_flow_info *flow_info;
401         int i, j;
402
403         for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
404                 if (!tun_tbl[i].t_dst_ip_valid)
405                         continue;
406                 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
407                         flow_info = &tun_tbl[i].tun_flow_info[j];
408                         if (ulp_chk_and_rem_tun_i_flow(&tun_tbl[i],
409                                                        flow_info, fid) == true)
410                                 return;
411                 }
412         }
413 }