net/bnxt: support VXLAN decap offload
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_tun.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include <rte_malloc.h>
7
8 #include "ulp_tun.h"
9 #include "ulp_rte_parser.h"
10 #include "ulp_template_db_enum.h"
11 #include "ulp_template_struct.h"
12 #include "ulp_matcher.h"
13 #include "ulp_mapper.h"
14 #include "ulp_flow_db.h"
15
16 /* This function programs the outer tunnel flow in the hardware. */
17 static int32_t
18 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
19                            struct bnxt_tun_cache_entry *tun_entry,
20                            uint16_t tun_idx)
21 {
22         struct bnxt_ulp_mapper_create_parms mparms = { 0 };
23         int ret;
24
25         /* Reset the JUMP action bit in the action bitmap as we don't
26          * offload this action.
27          */
28         ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
29
30         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
31
32         ret = ulp_matcher_pattern_match(params, &params->class_id);
33         if (ret != BNXT_TF_RC_SUCCESS)
34                 goto err;
35
36         ret = ulp_matcher_action_match(params, &params->act_tmpl);
37         if (ret != BNXT_TF_RC_SUCCESS)
38                 goto err;
39
40         params->parent_flow = true;
41         bnxt_ulp_init_mapper_params(&mparms, params,
42                                     BNXT_ULP_FDB_TYPE_REGULAR);
43         mparms.tun_idx = tun_idx;
44
45         /* Call the ulp mapper to create the flow in the hardware. */
46         ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
47         if (ret)
48                 goto err;
49
50         /* Store the tunnel dmac in the tunnel cache table and use it while
51          * programming tunnel flow F2.
52          */
53         memcpy(tun_entry->t_dmac,
54                &params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
55                RTE_ETHER_ADDR_LEN);
56
57         tun_entry->valid = true;
58         tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
59         tun_entry->outer_tun_flow_id = params->fid;
60
61         /* F1 and it's related F2s are correlated based on
62          * Tunnel Destination IP Address.
63          */
64         if (tun_entry->t_dst_ip_valid)
65                 goto done;
66         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
67                 memcpy(&tun_entry->t_dst_ip,
68                        &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
69                        sizeof(rte_be32_t));
70         else
71                 memcpy(tun_entry->t_dst_ip6,
72                        &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
73                        sizeof(tun_entry->t_dst_ip6));
74         tun_entry->t_dst_ip_valid = true;
75
76 done:
77         return BNXT_TF_RC_FID;
78
79 err:
80         memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
81         return BNXT_TF_RC_ERROR;
82 }
83
84 /* This function programs the inner tunnel flow in the hardware. */
85 static void
86 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry)
87 {
88         struct bnxt_ulp_mapper_create_parms mparms = { 0 };
89         struct ulp_rte_parser_params *params;
90         int ret;
91
92         /* F2 doesn't have tunnel dmac, use the tunnel dmac that was
93          * stored during F1 programming.
94          */
95         params = &tun_entry->first_inner_tun_params;
96         memcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
97                tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
98         params->parent_fid = tun_entry->outer_tun_flow_id;
99         params->fid = tun_entry->first_inner_tun_flow_id;
100
101         bnxt_ulp_init_mapper_params(&mparms, params,
102                                     BNXT_ULP_FDB_TYPE_REGULAR);
103
104         ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
105         if (ret)
106                 PMD_DRV_LOG(ERR, "Failed to create F2 flow.");
107 }
108
109 /* This function either install outer tunnel flow & inner tunnel flow
110  * or just the outer tunnel flow based on the flow state.
111  */
112 static int32_t
113 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
114                              struct bnxt_tun_cache_entry *tun_entry,
115                              uint16_t tun_idx)
116 {
117         enum bnxt_ulp_tun_flow_state flow_state;
118         int ret;
119
120         flow_state = tun_entry->state;
121         ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
122         if (ret)
123                 return ret;
124
125         /* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
126          * F1, that means F2 is not deferred. Hence, no need to install F2.
127          */
128         if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
129                 ulp_install_inner_tun_flow(tun_entry);
130
131         return 0;
132 }
133
134 /* This function will be called if inner tunnel flow request comes before
135  * outer tunnel flow request.
136  */
137 static int32_t
138 ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
139                                       struct bnxt_tun_cache_entry *tun_entry)
140 {
141         int ret;
142
143         ret = ulp_matcher_pattern_match(params, &params->class_id);
144         if (ret != BNXT_TF_RC_SUCCESS)
145                 return BNXT_TF_RC_ERROR;
146
147         ret = ulp_matcher_action_match(params, &params->act_tmpl);
148         if (ret != BNXT_TF_RC_SUCCESS)
149                 return BNXT_TF_RC_ERROR;
150
151         /* If Tunnel F2 flow comes first then we can't install it in the
152          * hardware, because, F2 flow will not have L2 context information.
153          * So, just cache the F2 information and program it in the context
154          * of F1 flow installation.
155          */
156         memcpy(&tun_entry->first_inner_tun_params, params,
157                sizeof(struct ulp_rte_parser_params));
158
159         tun_entry->first_inner_tun_flow_id = params->fid;
160         tun_entry->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
161
162         /* F1 and it's related F2s are correlated based on
163          * Tunnel Destination IP Address. It could be already set, if
164          * the inner flow got offloaded first.
165          */
166         if (tun_entry->t_dst_ip_valid)
167                 goto done;
168         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
169                 memcpy(&tun_entry->t_dst_ip,
170                        &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
171                        sizeof(rte_be32_t));
172         else
173                 memcpy(tun_entry->t_dst_ip6,
174                        &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
175                        sizeof(tun_entry->t_dst_ip6));
176         tun_entry->t_dst_ip_valid = true;
177
178 done:
179         return BNXT_TF_RC_FID;
180 }
181
182 /* This function will be called if inner tunnel flow request comes after
183  * the outer tunnel flow request.
184  */
185 static int32_t
186 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
187                                 struct bnxt_tun_cache_entry *tun_entry)
188 {
189         memcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
190                tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
191
192         params->parent_fid = tun_entry->outer_tun_flow_id;
193
194         return BNXT_TF_RC_NORMAL;
195 }
196
197 static int32_t
198 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
199                   struct bnxt_tun_cache_entry **tun_entry,
200                   uint16_t *tun_idx)
201 {
202         int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
203         struct bnxt_tun_cache_entry *tun_tbl;
204         bool tun_entry_found = false, free_entry_found = false;
205
206         tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
207         if (!tun_tbl)
208                 return BNXT_TF_RC_ERROR;
209
210         for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
211                 if (!memcmp(&tun_tbl[i].t_dst_ip,
212                             &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
213                             sizeof(rte_be32_t)) ||
214                     !memcmp(&tun_tbl[i].t_dst_ip6,
215                             &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
216                             16)) {
217                         tun_entry_found = true;
218                         break;
219                 }
220
221                 if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
222                         first_free_entry = i;
223                         free_entry_found = true;
224                 }
225         }
226
227         if (tun_entry_found) {
228                 *tun_entry = &tun_tbl[i];
229                 *tun_idx = i;
230         } else {
231                 if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
232                         return BNXT_TF_RC_ERROR;
233                 *tun_entry = &tun_tbl[first_free_entry];
234                 *tun_idx = first_free_entry;
235         }
236
237         return 0;
238 }
239
240 int32_t
241 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
242 {
243         bool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;
244         bool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;
245         enum bnxt_ulp_tun_flow_state flow_state;
246         struct bnxt_tun_cache_entry *tun_entry;
247         uint32_t l3_tun, l3_tun_decap;
248         uint16_t tun_idx;
249         int rc;
250
251         /* Computational fields that indicate it's a TUNNEL DECAP flow */
252         l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
253         l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
254                                            BNXT_ULP_CF_IDX_L3_TUN_DECAP);
255         if (!l3_tun)
256                 return BNXT_TF_RC_NORMAL;
257
258         rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
259         if (rc == BNXT_TF_RC_ERROR)
260                 return rc;
261
262         flow_state = tun_entry->state;
263         /* Outer tunnel flow validation */
264         outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
265         outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
266         outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
267                                                       outer_tun_sig);
268
269         /* Inner tunnel flow validation */
270         inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
271         first_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,
272                                                          inner_tun_sig);
273         inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
274         inner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,
275                                                       inner_tun_sig);
276
277         if (outer_tun_reject) {
278                 tun_entry->outer_tun_rej_cnt++;
279                 BNXT_TF_DBG(ERR,
280                             "Tunnel F1 flow rejected, COUNT: %d\n",
281                             tun_entry->outer_tun_rej_cnt);
282         /* Inner tunnel flow is rejected if it comes between first inner
283          * tunnel flow and outer flow requests.
284          */
285         } else if (inner_tun_reject) {
286                 tun_entry->inner_tun_rej_cnt++;
287                 BNXT_TF_DBG(ERR,
288                             "Tunnel F2 flow rejected, COUNT: %d\n",
289                             tun_entry->inner_tun_rej_cnt);
290         }
291
292         if (outer_tun_reject || inner_tun_reject)
293                 return BNXT_TF_RC_ERROR;
294         else if (first_inner_tun_flow)
295                 return ulp_post_process_first_inner_tun_flow(params, tun_entry);
296         else if (outer_tun_flow)
297                 return ulp_post_process_outer_tun_flow(params, tun_entry,
298                                                        tun_idx);
299         else if (inner_tun_flow)
300                 return ulp_post_process_inner_tun_flow(params, tun_entry);
301         else
302                 return BNXT_TF_RC_NORMAL;
303 }
304
305 void
306 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
307 {
308         memset(&tun_tbl[tun_idx], 0,
309                 sizeof(struct bnxt_tun_cache_entry));
310 }