net/bnxt: support flow template for Thor
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_tun.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_malloc.h>
9
10 #include "ulp_tun.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_template_db_enum.h"
13 #include "ulp_template_struct.h"
14 #include "ulp_matcher.h"
15 #include "ulp_mapper.h"
16 #include "ulp_flow_db.h"
17
18 /* This function programs the outer tunnel flow in the hardware. */
19 static int32_t
20 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
21                            struct bnxt_tun_cache_entry *tun_entry,
22                            uint16_t tun_idx)
23 {
24         struct bnxt_ulp_mapper_create_parms mparms = { 0 };
25         int ret;
26
27         /* Reset the JUMP action bit in the action bitmap as we don't
28          * offload this action.
29          */
30         ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
31
32         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
33
34 #ifdef  RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
35 #ifdef  RTE_LIBRTE_BNXT_TRUFLOW_DEBUG_PARSER
36         /* Dump the rte flow pattern */
37         ulp_parser_hdr_info_dump(params);
38         /* Dump the rte flow action */
39         ulp_parser_act_info_dump(params);
40 #endif
41 #endif
42
43         ret = ulp_matcher_pattern_match(params, &params->class_id);
44         if (ret != BNXT_TF_RC_SUCCESS)
45                 goto err;
46
47         ret = ulp_matcher_action_match(params, &params->act_tmpl);
48         if (ret != BNXT_TF_RC_SUCCESS)
49                 goto err;
50
51         params->parent_flow = true;
52         bnxt_ulp_init_mapper_params(&mparms, params,
53                                     BNXT_ULP_FDB_TYPE_REGULAR);
54         mparms.tun_idx = tun_idx;
55
56         /* Call the ulp mapper to create the flow in the hardware. */
57         ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
58         if (ret)
59                 goto err;
60
61         /* Store the tunnel dmac in the tunnel cache table and use it while
62          * programming tunnel inner flow.
63          */
64         memcpy(tun_entry->t_dmac,
65                &params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
66                RTE_ETHER_ADDR_LEN);
67
68         tun_entry->tun_flow_info[params->port_id].state =
69                                 BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
70         tun_entry->outer_tun_flow_id = params->fid;
71
72         /* Tunnel outer flow  and it's related inner flows are correlated
73          * based on Tunnel Destination IP Address.
74          */
75         if (tun_entry->t_dst_ip_valid)
76                 goto done;
77         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
78                 memcpy(&tun_entry->t_dst_ip,
79                        &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
80                        sizeof(rte_be32_t));
81         else
82                 memcpy(tun_entry->t_dst_ip6,
83                        &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
84                        sizeof(tun_entry->t_dst_ip6));
85         tun_entry->t_dst_ip_valid = true;
86
87 done:
88         return BNXT_TF_RC_FID;
89
90 err:
91         memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
92         return BNXT_TF_RC_ERROR;
93 }
94
95 /* This function programs the inner tunnel flow in the hardware. */
96 static void
97 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
98                            struct ulp_rte_parser_params *tun_o_params)
99 {
100         struct bnxt_ulp_mapper_create_parms mparms = { 0 };
101         struct ulp_per_port_flow_info *flow_info;
102         struct ulp_rte_parser_params *inner_params;
103         int ret;
104
105         /* Tunnel inner flow doesn't have tunnel dmac, use the tunnel
106          * dmac that was stored during F1 programming.
107          */
108         flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
109         STAILQ_FOREACH(inner_params, &flow_info->tun_i_prms_list, next) {
110                 memcpy(&inner_params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
111                        tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
112                 inner_params->parent_fid = tun_entry->outer_tun_flow_id;
113
114                 bnxt_ulp_init_mapper_params(&mparms, inner_params,
115                                             BNXT_ULP_FDB_TYPE_REGULAR);
116
117                 ret = ulp_mapper_flow_create(inner_params->ulp_ctx, &mparms);
118                 if (ret)
119                         PMD_DRV_LOG(ERR,
120                                     "Failed to create inner tun flow, FID:%u.",
121                                     inner_params->fid);
122         }
123 }
124
125 /* This function either install outer tunnel flow & inner tunnel flow
126  * or just the outer tunnel flow based on the flow state.
127  */
128 static int32_t
129 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
130                              struct bnxt_tun_cache_entry *tun_entry,
131                              uint16_t tun_idx)
132 {
133         int ret;
134
135         ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
136         if (ret == BNXT_TF_RC_ERROR) {
137                 PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
138                 return ret;
139         }
140
141         /* Install any cached tunnel inner flows that came before tunnel
142          * outer flow.
143          */
144         ulp_install_inner_tun_flow(tun_entry, params);
145
146         return BNXT_TF_RC_FID;
147 }
148
149 /* This function will be called if inner tunnel flow request comes before
150  * outer tunnel flow request.
151  */
152 static int32_t
153 ulp_post_process_cache_inner_tun_flow(struct ulp_rte_parser_params *params,
154                                       struct bnxt_tun_cache_entry *tun_entry)
155 {
156         struct ulp_rte_parser_params *inner_tun_params;
157         struct ulp_per_port_flow_info *flow_info;
158         int ret;
159
160 #ifdef  RTE_LIBRTE_BNXT_TRUFLOW_DEBUG
161 #ifdef  RTE_LIBRTE_BNXT_TRUFLOW_DEBUG_PARSER
162         /* Dump the rte flow pattern */
163         ulp_parser_hdr_info_dump(params);
164         /* Dump the rte flow action */
165         ulp_parser_act_info_dump(params);
166 #endif
167 #endif
168
169         ret = ulp_matcher_pattern_match(params, &params->class_id);
170         if (ret != BNXT_TF_RC_SUCCESS)
171                 return BNXT_TF_RC_ERROR;
172
173         ret = ulp_matcher_action_match(params, &params->act_tmpl);
174         if (ret != BNXT_TF_RC_SUCCESS)
175                 return BNXT_TF_RC_ERROR;
176
177         /* If Tunnel inner flow comes first then we can't install it in the
178          * hardware, because, Tunnel inner flow will not have L2 context
179          * information. So, just cache the Tunnel inner flow information
180          * and program it in the context of F1 flow installation.
181          */
182         flow_info = &tun_entry->tun_flow_info[params->port_id];
183         inner_tun_params = rte_zmalloc("ulp_inner_tun_params",
184                                        sizeof(struct ulp_rte_parser_params), 0);
185         if (!inner_tun_params)
186                 return BNXT_TF_RC_ERROR;
187         memcpy(inner_tun_params, params, sizeof(struct ulp_rte_parser_params));
188         STAILQ_INSERT_TAIL(&flow_info->tun_i_prms_list, inner_tun_params,
189                            next);
190         flow_info->tun_i_cnt++;
191
192         /* F1 and it's related Tunnel inner flows are correlated based on
193          * Tunnel Destination IP Address. It could be already set, if
194          * the inner flow got offloaded first.
195          */
196         if (tun_entry->t_dst_ip_valid)
197                 goto done;
198         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
199                 memcpy(&tun_entry->t_dst_ip,
200                        &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
201                        sizeof(rte_be32_t));
202         else
203                 memcpy(tun_entry->t_dst_ip6,
204                        &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
205                        sizeof(tun_entry->t_dst_ip6));
206         tun_entry->t_dst_ip_valid = true;
207
208 done:
209         return BNXT_TF_RC_FID;
210 }
211
212 /* This function will be called if inner tunnel flow request comes after
213  * the outer tunnel flow request.
214  */
215 static int32_t
216 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
217                                 struct bnxt_tun_cache_entry *tun_entry)
218 {
219         memcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
220                tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
221
222         params->parent_fid = tun_entry->outer_tun_flow_id;
223
224         return BNXT_TF_RC_NORMAL;
225 }
226
227 static int32_t
228 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
229                   struct bnxt_tun_cache_entry **tun_entry,
230                   uint16_t *tun_idx)
231 {
232         int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
233         struct bnxt_tun_cache_entry *tun_tbl;
234         bool tun_entry_found = false, free_entry_found = false;
235
236         tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
237         if (!tun_tbl)
238                 return BNXT_TF_RC_ERROR;
239
240         for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
241                 if (!memcmp(&tun_tbl[i].t_dst_ip,
242                             &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
243                             sizeof(rte_be32_t)) ||
244                     !memcmp(&tun_tbl[i].t_dst_ip6,
245                             &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
246                             16)) {
247                         tun_entry_found = true;
248                         break;
249                 }
250
251                 if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
252                         first_free_entry = i;
253                         free_entry_found = true;
254                 }
255         }
256
257         if (tun_entry_found) {
258                 *tun_entry = &tun_tbl[i];
259                 *tun_idx = i;
260         } else {
261                 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
262                         return BNXT_TF_RC_ERROR;
263                 *tun_entry = &tun_tbl[first_free_entry];
264                 *tun_idx = first_free_entry;
265         }
266
267         return 0;
268 }
269
270 int32_t
271 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
272 {
273         bool inner_tun_sig, cache_inner_tun_flow;
274         bool outer_tun_reject, outer_tun_flow, inner_tun_flow;
275         enum bnxt_ulp_tun_flow_state flow_state;
276         struct bnxt_tun_cache_entry *tun_entry;
277         uint32_t l3_tun, l3_tun_decap;
278         uint16_t tun_idx;
279         int rc;
280
281         /* Computational fields that indicate it's a TUNNEL DECAP flow */
282         l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
283         l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
284                                            BNXT_ULP_CF_IDX_L3_TUN_DECAP);
285         if (!l3_tun)
286                 return BNXT_TF_RC_NORMAL;
287
288         rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
289         if (rc == BNXT_TF_RC_ERROR)
290                 return rc;
291
292         if (params->port_id >= RTE_MAX_ETHPORTS)
293                 return BNXT_TF_RC_ERROR;
294         flow_state = tun_entry->tun_flow_info[params->port_id].state;
295         /* Outer tunnel flow validation */
296         outer_tun_flow = BNXT_OUTER_TUN_FLOW(l3_tun, params);
297         outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
298                                                       outer_tun_flow);
299
300         /* Inner tunnel flow validation */
301         inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
302         cache_inner_tun_flow = BNXT_CACHE_INNER_TUN_FLOW(flow_state,
303                                                          inner_tun_sig);
304         inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
305
306         if (outer_tun_reject) {
307                 tun_entry->outer_tun_rej_cnt++;
308                 BNXT_TF_DBG(ERR,
309                             "Tunnel F1 flow rejected, COUNT: %d\n",
310                             tun_entry->outer_tun_rej_cnt);
311         }
312
313         if (outer_tun_reject)
314                 return BNXT_TF_RC_ERROR;
315         else if (cache_inner_tun_flow)
316                 return ulp_post_process_cache_inner_tun_flow(params, tun_entry);
317         else if (outer_tun_flow)
318                 return ulp_post_process_outer_tun_flow(params, tun_entry,
319                                                        tun_idx);
320         else if (inner_tun_flow)
321                 return ulp_post_process_inner_tun_flow(params, tun_entry);
322         else
323                 return BNXT_TF_RC_NORMAL;
324 }
325
326 void
327 ulp_tun_tbl_init(struct bnxt_tun_cache_entry *tun_tbl)
328 {
329         struct ulp_per_port_flow_info *flow_info;
330         int i, j;
331
332         for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
333                 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
334                         flow_info = &tun_tbl[i].tun_flow_info[j];
335                         STAILQ_INIT(&flow_info->tun_i_prms_list);
336                 }
337         }
338 }
339
340 void
341 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
342 {
343         struct ulp_rte_parser_params *inner_params;
344         struct ulp_per_port_flow_info *flow_info;
345         int j;
346
347         for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
348                 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
349                 STAILQ_FOREACH(inner_params,
350                                &flow_info->tun_i_prms_list,
351                                next) {
352                         STAILQ_REMOVE(&flow_info->tun_i_prms_list,
353                                       inner_params,
354                                       ulp_rte_parser_params, next);
355                         rte_free(inner_params);
356                 }
357         }
358
359         memset(&tun_tbl[tun_idx], 0,
360                         sizeof(struct bnxt_tun_cache_entry));
361
362         for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
363                 flow_info = &tun_tbl[tun_idx].tun_flow_info[j];
364                 STAILQ_INIT(&flow_info->tun_i_prms_list);
365         }
366 }
367
368 static bool
369 ulp_chk_and_rem_tun_i_flow(struct bnxt_tun_cache_entry *tun_entry,
370                            struct ulp_per_port_flow_info *flow_info,
371                            uint32_t fid)
372 {
373         struct ulp_rte_parser_params *inner_params;
374         int j;
375
376         STAILQ_FOREACH(inner_params,
377                        &flow_info->tun_i_prms_list,
378                        next) {
379                 if (inner_params->fid == fid) {
380                         STAILQ_REMOVE(&flow_info->tun_i_prms_list,
381                                       inner_params,
382                                       ulp_rte_parser_params,
383                                       next);
384                         rte_free(inner_params);
385                         flow_info->tun_i_cnt--;
386                         /* When a dpdk application offloads a duplicate
387                          * tunnel inner flow on a port that it is not
388                          * destined to, there won't be a tunnel outer flow
389                          * associated with these duplicate tunnel inner flows.
390                          * So, when the last tunnel inner flow ages out, the
391                          * driver has to clear the tunnel entry, otherwise
392                          * the tunnel entry cannot be reused.
393                          */
394                         if (!flow_info->tun_i_cnt &&
395                             flow_info->state != BNXT_ULP_FLOW_STATE_TUN_O_OFFLD) {
396                                 memset(tun_entry, 0,
397                                        sizeof(struct bnxt_tun_cache_entry));
398                                 for (j = 0; j < RTE_MAX_ETHPORTS; j++)
399                                         STAILQ_INIT(&flow_info->tun_i_prms_list);
400                         }
401                         return true;
402                 }
403         }
404
405         return false;
406 }
407
408 /* When a dpdk application offloads the same tunnel inner flow
409  * on all the uplink ports, a tunnel inner flow entry is cached
410  * even if it is not for the right uplink port. Such tunnel
411  * inner flows will eventually get aged out as there won't be
412  * any traffic on these ports. When such a flow destroy is
413  * called, cleanup the tunnel inner flow entry.
414  */
415 void
416 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
417 {
418         struct ulp_per_port_flow_info *flow_info;
419         int i, j;
420
421         for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
422                 if (!tun_tbl[i].t_dst_ip_valid)
423                         continue;
424                 for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
425                         flow_info = &tun_tbl[i].tun_flow_info[j];
426                         if (ulp_chk_and_rem_tun_i_flow(&tun_tbl[i],
427                                                        flow_info, fid) == true)
428                                 return;
429                 }
430         }
431 }