net/bnxt: support flow API item parsing
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
12 #include "tfp.h"
13
14 /* Inline Func to read integer that is stored in big endian format */
15 static inline void ulp_util_field_int_read(uint8_t *buffer,
16                                            uint32_t *val)
17 {
18         uint32_t temp_val;
19
20         memcpy(&temp_val, buffer, sizeof(uint32_t));
21         *val = rte_be_to_cpu_32(temp_val);
22 }
23
24 /* Inline Func to write integer that is stored in big endian format */
25 static inline void ulp_util_field_int_write(uint8_t *buffer,
26                                             uint32_t val)
27 {
28         uint32_t temp_val = rte_cpu_to_be_32(val);
29
30         memcpy(buffer, &temp_val, sizeof(uint32_t));
31 }
32
33 /*
34  * Function to handle the parsing of RTE Flows and placing
35  * the RTE flow items into the ulp structures.
36  */
37 int32_t
38 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
39                               struct ulp_rte_hdr_bitmap *hdr_bitmap,
40                               struct ulp_rte_hdr_field *hdr_field)
41 {
42         const struct rte_flow_item *item = pattern;
43         uint32_t field_idx = BNXT_ULP_HDR_FIELD_LAST;
44         uint32_t vlan_idx = 0;
45         struct bnxt_ulp_rte_hdr_info *hdr_info;
46
47         /* Parse all the items in the pattern */
48         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
49                 /* get the header information from the flow_hdr_info table */
50                 hdr_info = &ulp_hdr_info[item->type];
51                 if (hdr_info->hdr_type ==
52                     BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
53                         BNXT_TF_DBG(ERR,
54                                     "Truflow parser does not support type %d\n",
55                                     item->type);
56                         return BNXT_TF_RC_PARSE_ERR;
57                 } else if (hdr_info->hdr_type ==
58                            BNXT_ULP_HDR_TYPE_SUPPORTED) {
59                         /* call the registered callback handler */
60                         if (hdr_info->proto_hdr_func) {
61                                 if (hdr_info->proto_hdr_func(item,
62                                                              hdr_bitmap,
63                                                              hdr_field,
64                                                              &field_idx,
65                                                              &vlan_idx) !=
66                                     BNXT_TF_RC_SUCCESS) {
67                                         return BNXT_TF_RC_ERROR;
68                                 }
69                         }
70                 }
71                 item++;
72         }
73         return BNXT_TF_RC_SUCCESS;
74 }
75
76 /* Function to handle the parsing of RTE Flow item PF Header. */
77 static int32_t
78 ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap,
79                         struct ulp_rte_hdr_field *hdr_field,
80                         enum rte_flow_item_type proto,
81                         uint32_t svif,
82                         uint32_t mask)
83 {
84         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF)) {
85                 BNXT_TF_DBG(ERR,
86                             "SVIF already set,"
87                             " multiple sources not supported\n");
88                 return BNXT_TF_RC_ERROR;
89         }
90
91         /* TBD: Check for any mapping errors for svif */
92         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF. */
93         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF);
94
95         if (proto != RTE_FLOW_ITEM_TYPE_PF) {
96                 memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec,
97                        &svif, sizeof(svif));
98                 memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask,
99                        &mask, sizeof(mask));
100                 hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif);
101         }
102
103         return BNXT_TF_RC_SUCCESS;
104 }
105
106 /* Function to handle the parsing of RTE Flow item PF Header. */
107 int32_t
108 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
109                        struct ulp_rte_hdr_bitmap *hdr_bitmap,
110                        struct ulp_rte_hdr_field *hdr_field,
111                        uint32_t *field_idx __rte_unused,
112                        uint32_t *vlan_idx __rte_unused)
113 {
114         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
115                                        item->type, 0, 0);
116 }
117
118 /* Function to handle the parsing of RTE Flow item VF Header. */
119 int32_t
120 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
121                        struct ulp_rte_hdr_bitmap *hdr_bitmap,
122                        struct ulp_rte_hdr_field  *hdr_field,
123                        uint32_t *field_idx __rte_unused,
124                        uint32_t *vlan_idx __rte_unused)
125 {
126         const struct rte_flow_item_vf *vf_spec, *vf_mask;
127         uint32_t svif = 0, mask = 0;
128
129         vf_spec = item->spec;
130         vf_mask = item->mask;
131
132         /*
133          * Copy the rte_flow_item for eth into hdr_field using ethernet
134          * header fields.
135          */
136         if (vf_spec)
137                 svif = vf_spec->id;
138         if (vf_mask)
139                 mask = vf_mask->id;
140
141         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
142                                        item->type, svif, mask);
143 }
144
145 /* Function to handle the parsing of RTE Flow item port id  Header. */
146 int32_t
147 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
148                             struct ulp_rte_hdr_bitmap *hdr_bitmap,
149                             struct ulp_rte_hdr_field *hdr_field,
150                             uint32_t *field_idx __rte_unused,
151                             uint32_t *vlan_idx __rte_unused)
152 {
153         const struct rte_flow_item_port_id *port_spec, *port_mask;
154         uint32_t svif = 0, mask = 0;
155
156         port_spec = item->spec;
157         port_mask = item->mask;
158
159         /*
160          * Copy the rte_flow_item for Port into hdr_field using port id
161          * header fields.
162          */
163         if (port_spec)
164                 svif = port_spec->id;
165         if (port_mask)
166                 mask = port_mask->id;
167
168         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
169                                        item->type, svif, mask);
170 }
171
172 /* Function to handle the parsing of RTE Flow item phy port Header. */
173 int32_t
174 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
175                              struct ulp_rte_hdr_bitmap *hdr_bitmap,
176                              struct ulp_rte_hdr_field *hdr_field,
177                              uint32_t *field_idx __rte_unused,
178                              uint32_t *vlan_idx __rte_unused)
179 {
180         const struct rte_flow_item_phy_port *port_spec, *port_mask;
181         uint32_t svif = 0, mask = 0;
182
183         port_spec = item->spec;
184         port_mask = item->mask;
185
186         /* Copy the rte_flow_item for phy port into hdr_field */
187         if (port_spec)
188                 svif = port_spec->index;
189         if (port_mask)
190                 mask = port_mask->index;
191
192         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
193                                        item->type, svif, mask);
194 }
195
196 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
197 int32_t
198 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
199                         struct ulp_rte_hdr_bitmap *hdr_bitmap,
200                         struct ulp_rte_hdr_field *hdr_field,
201                         uint32_t *field_idx,
202                         uint32_t *vlan_idx)
203 {
204         const struct rte_flow_item_eth *eth_spec, *eth_mask;
205         uint32_t idx = *field_idx;
206         uint32_t mdx = *field_idx;
207         uint64_t set_flag = 0;
208
209         eth_spec = item->spec;
210         eth_mask = item->mask;
211
212         /*
213          * Copy the rte_flow_item for eth into hdr_field using ethernet
214          * header fields
215          */
216         if (eth_spec) {
217                 hdr_field[idx].size = sizeof(eth_spec->dst.addr_bytes);
218                 memcpy(hdr_field[idx++].spec, eth_spec->dst.addr_bytes,
219                        sizeof(eth_spec->dst.addr_bytes));
220                 hdr_field[idx].size = sizeof(eth_spec->src.addr_bytes);
221                 memcpy(hdr_field[idx++].spec, eth_spec->src.addr_bytes,
222                        sizeof(eth_spec->src.addr_bytes));
223                 hdr_field[idx].size = sizeof(eth_spec->type);
224                 memcpy(hdr_field[idx++].spec, &eth_spec->type,
225                        sizeof(eth_spec->type));
226         } else {
227                 idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
228         }
229
230         if (eth_mask) {
231                 memcpy(hdr_field[mdx++].mask, eth_mask->dst.addr_bytes,
232                        sizeof(eth_mask->dst.addr_bytes));
233                 memcpy(hdr_field[mdx++].mask, eth_mask->src.addr_bytes,
234                        sizeof(eth_mask->src.addr_bytes));
235                 memcpy(hdr_field[mdx++].mask, &eth_mask->type,
236                        sizeof(eth_mask->type));
237         }
238         /* Add number of vlan header elements */
239         *field_idx = idx + BNXT_ULP_PROTO_HDR_VLAN_NUM;
240         *vlan_idx = idx;
241
242         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
243         set_flag = ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
244         if (set_flag)
245                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
246         else
247                 ULP_BITMAP_RESET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
248
249         /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
250         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
251
252         return BNXT_TF_RC_SUCCESS;
253 }
254
255 /* Function to handle the parsing of RTE Flow item Vlan Header. */
256 int32_t
257 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
258                          struct ulp_rte_hdr_bitmap *hdr_bitmap,
259                          struct ulp_rte_hdr_field *hdr_field,
260                          uint32_t *field_idx __rte_unused,
261                          uint32_t *vlan_idx)
262 {
263         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
264         uint32_t idx = *vlan_idx;
265         uint32_t mdx = *vlan_idx;
266         uint16_t vlan_tag, priority;
267         uint32_t outer_vtag_num = 0, inner_vtag_num = 0;
268         uint8_t *outer_tag_buffer;
269         uint8_t *inner_tag_buffer;
270
271         vlan_spec = item->spec;
272         vlan_mask = item->mask;
273         outer_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].spec;
274         inner_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].spec;
275
276         /*
277          * Copy the rte_flow_item for vlan into hdr_field using Vlan
278          * header fields
279          */
280         if (vlan_spec) {
281                 vlan_tag = ntohs(vlan_spec->tci);
282                 priority = htons(vlan_tag >> 13);
283                 vlan_tag &= 0xfff;
284                 vlan_tag = htons(vlan_tag);
285
286                 hdr_field[idx].size = sizeof(priority);
287                 memcpy(hdr_field[idx++].spec, &priority, sizeof(priority));
288                 hdr_field[idx].size = sizeof(vlan_tag);
289                 memcpy(hdr_field[idx++].spec, &vlan_tag, sizeof(vlan_tag));
290                 hdr_field[idx].size = sizeof(vlan_spec->inner_type);
291                 memcpy(hdr_field[idx++].spec, &vlan_spec->inner_type,
292                        sizeof(vlan_spec->inner_type));
293         } else {
294                 idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
295         }
296
297         if (vlan_mask) {
298                 vlan_tag = ntohs(vlan_mask->tci);
299                 priority = htons(vlan_tag >> 13);
300                 vlan_tag &= 0xfff;
301                 vlan_tag = htons(vlan_tag);
302
303                 memcpy(hdr_field[mdx++].mask, &priority, sizeof(priority));
304                 memcpy(hdr_field[mdx++].mask, &vlan_tag, sizeof(vlan_tag));
305                 memcpy(hdr_field[mdx++].mask, &vlan_mask->inner_type,
306                        sizeof(vlan_mask->inner_type));
307         }
308         /* Set the vlan index to new incremented value */
309         *vlan_idx = idx;
310
311         /* Get the outer tag and inner tag counts */
312         ulp_util_field_int_read(outer_tag_buffer, &outer_vtag_num);
313         ulp_util_field_int_read(inner_tag_buffer, &inner_vtag_num);
314
315         /* Update the hdr_bitmap of the vlans */
316         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
317             !ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) {
318                 /* Set the outer vlan bit and update the vlan tag num */
319                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN);
320                 outer_vtag_num++;
321                 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
322                 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
323                                                         sizeof(uint32_t);
324         } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
325                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
326                                     BNXT_ULP_HDR_BIT_OO_VLAN) &&
327                    !ULP_BITMAP_ISSET(hdr_bitmap->bits,
328                                      BNXT_ULP_HDR_BIT_OI_VLAN)) {
329                 /* Set the outer vlan bit and update the vlan tag num */
330                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OI_VLAN);
331                 outer_vtag_num++;
332                 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
333                 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
334                                                             sizeof(uint32_t);
335         } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
336                                     BNXT_ULP_HDR_BIT_O_ETH) &&
337                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
338                                     BNXT_ULP_HDR_BIT_OO_VLAN) &&
339                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
340                                     BNXT_ULP_HDR_BIT_OI_VLAN) &&
341                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
342                                     BNXT_ULP_HDR_BIT_I_ETH) &&
343                    !ULP_BITMAP_ISSET(hdr_bitmap->bits,
344                                      BNXT_ULP_HDR_BIT_IO_VLAN)) {
345                 /* Set the inner vlan bit and update the vlan tag num */
346                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_IO_VLAN);
347                 inner_vtag_num++;
348                 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
349                 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
350                                                             sizeof(uint32_t);
351         } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
352                                     BNXT_ULP_HDR_BIT_O_ETH) &&
353                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
354                                     BNXT_ULP_HDR_BIT_OO_VLAN) &&
355                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
356                                     BNXT_ULP_HDR_BIT_OI_VLAN) &&
357                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
358                                     BNXT_ULP_HDR_BIT_I_ETH) &&
359                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
360                                     BNXT_ULP_HDR_BIT_IO_VLAN) &&
361                    !ULP_BITMAP_ISSET(hdr_bitmap->bits,
362                                      BNXT_ULP_HDR_BIT_II_VLAN)) {
363                 /* Set the inner vlan bit and update the vlan tag num */
364                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_II_VLAN);
365                 inner_vtag_num++;
366                 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
367                 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
368                                                             sizeof(uint32_t);
369         } else {
370                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
371                 return BNXT_TF_RC_ERROR;
372         }
373         return BNXT_TF_RC_SUCCESS;
374 }
375
376 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
377 int32_t
378 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
379                          struct ulp_rte_hdr_bitmap *hdr_bitmap,
380                          struct ulp_rte_hdr_field *hdr_field,
381                          uint32_t *field_idx,
382                          uint32_t *vlan_idx __rte_unused)
383 {
384         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
385         uint32_t idx = *field_idx;
386         uint32_t mdx = *field_idx;
387
388         ipv4_spec = item->spec;
389         ipv4_mask = item->mask;
390
391         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
392                 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
393                 return BNXT_TF_RC_ERROR;
394         }
395
396         /*
397          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
398          * header fields
399          */
400         if (ipv4_spec) {
401                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.version_ihl);
402                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.version_ihl,
403                        sizeof(ipv4_spec->hdr.version_ihl));
404                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.type_of_service);
405                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.type_of_service,
406                        sizeof(ipv4_spec->hdr.type_of_service));
407                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.total_length);
408                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.total_length,
409                        sizeof(ipv4_spec->hdr.total_length));
410                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.packet_id);
411                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.packet_id,
412                        sizeof(ipv4_spec->hdr.packet_id));
413                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.fragment_offset);
414                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.fragment_offset,
415                        sizeof(ipv4_spec->hdr.fragment_offset));
416                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.time_to_live);
417                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.time_to_live,
418                        sizeof(ipv4_spec->hdr.time_to_live));
419                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.next_proto_id);
420                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.next_proto_id,
421                        sizeof(ipv4_spec->hdr.next_proto_id));
422                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.hdr_checksum);
423                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.hdr_checksum,
424                        sizeof(ipv4_spec->hdr.hdr_checksum));
425                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.src_addr);
426                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.src_addr,
427                        sizeof(ipv4_spec->hdr.src_addr));
428                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.dst_addr);
429                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.dst_addr,
430                        sizeof(ipv4_spec->hdr.dst_addr));
431         } else {
432                 idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
433         }
434
435         if (ipv4_mask) {
436                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.version_ihl,
437                        sizeof(ipv4_mask->hdr.version_ihl));
438                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.type_of_service,
439                        sizeof(ipv4_mask->hdr.type_of_service));
440                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.total_length,
441                        sizeof(ipv4_mask->hdr.total_length));
442                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.packet_id,
443                        sizeof(ipv4_mask->hdr.packet_id));
444                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.fragment_offset,
445                        sizeof(ipv4_mask->hdr.fragment_offset));
446                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.time_to_live,
447                        sizeof(ipv4_mask->hdr.time_to_live));
448                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.next_proto_id,
449                        sizeof(ipv4_mask->hdr.next_proto_id));
450                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.hdr_checksum,
451                        sizeof(ipv4_mask->hdr.hdr_checksum));
452                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.src_addr,
453                        sizeof(ipv4_mask->hdr.src_addr));
454                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.dst_addr,
455                        sizeof(ipv4_mask->hdr.dst_addr));
456         }
457         *field_idx = idx; /* Number of ipv4 header elements */
458
459         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
460         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
461             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
462             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
463                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
464                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
465         } else {
466                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
467                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
468         }
469         return BNXT_TF_RC_SUCCESS;
470 }
471
472 /* Function to handle the parsing of RTE Flow item IPV6 Header */
473 int32_t
474 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
475                          struct ulp_rte_hdr_bitmap *hdr_bitmap,
476                          struct ulp_rte_hdr_field *hdr_field,
477                          uint32_t *field_idx,
478                          uint32_t *vlan_idx __rte_unused)
479 {
480         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
481         uint32_t idx = *field_idx;
482         uint32_t mdx = *field_idx;
483
484         ipv6_spec = item->spec;
485         ipv6_mask = item->mask;
486
487         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
488                 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
489                 return BNXT_TF_RC_ERROR;
490         }
491
492         /*
493          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
494          * header fields
495          */
496         if (ipv6_spec) {
497                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.vtc_flow);
498                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.vtc_flow,
499                        sizeof(ipv6_spec->hdr.vtc_flow));
500                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.payload_len);
501                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.payload_len,
502                        sizeof(ipv6_spec->hdr.payload_len));
503                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.proto);
504                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.proto,
505                        sizeof(ipv6_spec->hdr.proto));
506                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.hop_limits);
507                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.hop_limits,
508                        sizeof(ipv6_spec->hdr.hop_limits));
509                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.src_addr);
510                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.src_addr,
511                        sizeof(ipv6_spec->hdr.src_addr));
512                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.dst_addr);
513                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.dst_addr,
514                        sizeof(ipv6_spec->hdr.dst_addr));
515         } else {
516                 idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
517         }
518
519         if (ipv6_mask) {
520                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.vtc_flow,
521                        sizeof(ipv6_mask->hdr.vtc_flow));
522                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.payload_len,
523                        sizeof(ipv6_mask->hdr.payload_len));
524                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.proto,
525                        sizeof(ipv6_mask->hdr.proto));
526                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.hop_limits,
527                        sizeof(ipv6_mask->hdr.hop_limits));
528                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.src_addr,
529                        sizeof(ipv6_mask->hdr.src_addr));
530                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.dst_addr,
531                        sizeof(ipv6_mask->hdr.dst_addr));
532         }
533         *field_idx = idx; /* add number of ipv6 header elements */
534
535         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
536         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
537             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
538             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
539                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
540                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
541         } else {
542                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
543                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
544         }
545         return BNXT_TF_RC_SUCCESS;
546 }
547
548 /* Function to handle the parsing of RTE Flow item UDP Header. */
549 int32_t
550 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
551                         struct ulp_rte_hdr_bitmap *hdr_bitmap,
552                         struct ulp_rte_hdr_field *hdr_field,
553                         uint32_t *field_idx,
554                         uint32_t *vlan_idx __rte_unused)
555 {
556         const struct rte_flow_item_udp *udp_spec, *udp_mask;
557         uint32_t idx = *field_idx;
558         uint32_t mdx = *field_idx;
559
560         udp_spec = item->spec;
561         udp_mask = item->mask;
562
563         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
564                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
565                 return BNXT_TF_RC_ERROR;
566         }
567
568         /*
569          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
570          * header fields
571          */
572         if (udp_spec) {
573                 hdr_field[idx].size = sizeof(udp_spec->hdr.src_port);
574                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.src_port,
575                        sizeof(udp_spec->hdr.src_port));
576                 hdr_field[idx].size = sizeof(udp_spec->hdr.dst_port);
577                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dst_port,
578                        sizeof(udp_spec->hdr.dst_port));
579                 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_len);
580                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_len,
581                        sizeof(udp_spec->hdr.dgram_len));
582                 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_cksum);
583                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_cksum,
584                        sizeof(udp_spec->hdr.dgram_cksum));
585         } else {
586                 idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
587         }
588
589         if (udp_mask) {
590                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.src_port,
591                        sizeof(udp_mask->hdr.src_port));
592                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dst_port,
593                        sizeof(udp_mask->hdr.dst_port));
594                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_len,
595                        sizeof(udp_mask->hdr.dgram_len));
596                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_cksum,
597                        sizeof(udp_mask->hdr.dgram_cksum));
598         }
599         *field_idx = idx; /* Add number of UDP header elements */
600
601         /* Set the udp header bitmap and computed l4 header bitmaps */
602         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
603             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
604             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
605                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
606                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
607         } else {
608                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
609                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
610         }
611         return BNXT_TF_RC_SUCCESS;
612 }
613
614 /* Function to handle the parsing of RTE Flow item TCP Header. */
615 int32_t
616 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
617                         struct ulp_rte_hdr_bitmap *hdr_bitmap,
618                         struct ulp_rte_hdr_field *hdr_field,
619                         uint32_t *field_idx,
620                         uint32_t *vlan_idx __rte_unused)
621 {
622         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
623         uint32_t idx = *field_idx;
624         uint32_t mdx = *field_idx;
625
626         tcp_spec = item->spec;
627         tcp_mask = item->mask;
628
629         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
630                 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
631                 return BNXT_TF_RC_ERROR;
632         }
633
634         /*
635          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
636          * header fields
637          */
638         if (tcp_spec) {
639                 hdr_field[idx].size = sizeof(tcp_spec->hdr.src_port);
640                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.src_port,
641                        sizeof(tcp_spec->hdr.src_port));
642                 hdr_field[idx].size = sizeof(tcp_spec->hdr.dst_port);
643                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.dst_port,
644                        sizeof(tcp_spec->hdr.dst_port));
645                 hdr_field[idx].size = sizeof(tcp_spec->hdr.sent_seq);
646                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.sent_seq,
647                        sizeof(tcp_spec->hdr.sent_seq));
648                 hdr_field[idx].size = sizeof(tcp_spec->hdr.recv_ack);
649                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.recv_ack,
650                        sizeof(tcp_spec->hdr.recv_ack));
651                 hdr_field[idx].size = sizeof(tcp_spec->hdr.data_off);
652                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.data_off,
653                        sizeof(tcp_spec->hdr.data_off));
654                 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_flags);
655                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_flags,
656                        sizeof(tcp_spec->hdr.tcp_flags));
657                 hdr_field[idx].size = sizeof(tcp_spec->hdr.rx_win);
658                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.rx_win,
659                        sizeof(tcp_spec->hdr.rx_win));
660                 hdr_field[idx].size = sizeof(tcp_spec->hdr.cksum);
661                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.cksum,
662                        sizeof(tcp_spec->hdr.cksum));
663                 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_urp);
664                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_urp,
665                        sizeof(tcp_spec->hdr.tcp_urp));
666         } else {
667                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
668         }
669
670         if (tcp_mask) {
671                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.src_port,
672                        sizeof(tcp_mask->hdr.src_port));
673                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.dst_port,
674                        sizeof(tcp_mask->hdr.dst_port));
675                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.sent_seq,
676                        sizeof(tcp_mask->hdr.sent_seq));
677                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.recv_ack,
678                        sizeof(tcp_mask->hdr.recv_ack));
679                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.data_off,
680                        sizeof(tcp_mask->hdr.data_off));
681                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_flags,
682                        sizeof(tcp_mask->hdr.tcp_flags));
683                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.rx_win,
684                        sizeof(tcp_mask->hdr.rx_win));
685                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.cksum,
686                        sizeof(tcp_mask->hdr.cksum));
687                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_urp,
688                        sizeof(tcp_mask->hdr.tcp_urp));
689         }
690         *field_idx = idx; /* add number of TCP header elements */
691
692         /* Set the udp header bitmap and computed l4 header bitmaps */
693         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
694             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
695             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
696                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
697                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
698         } else {
699                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
700                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
701         }
702         return BNXT_TF_RC_SUCCESS;
703 }
704
705 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
706 int32_t
707 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
708                           struct ulp_rte_hdr_bitmap *hdrbitmap,
709                           struct ulp_rte_hdr_field *hdr_field,
710                           uint32_t *field_idx,
711                           uint32_t *vlan_idx __rte_unused)
712 {
713         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
714         uint32_t idx = *field_idx;
715         uint32_t mdx = *field_idx;
716
717         vxlan_spec = item->spec;
718         vxlan_mask = item->mask;
719
720         /*
721          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
722          * header fields
723          */
724         if (vxlan_spec) {
725                 hdr_field[idx].size = sizeof(vxlan_spec->flags);
726                 memcpy(hdr_field[idx++].spec, &vxlan_spec->flags,
727                        sizeof(vxlan_spec->flags));
728                 hdr_field[idx].size = sizeof(vxlan_spec->rsvd0);
729                 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd0,
730                        sizeof(vxlan_spec->rsvd0));
731                 hdr_field[idx].size = sizeof(vxlan_spec->vni);
732                 memcpy(hdr_field[idx++].spec, &vxlan_spec->vni,
733                        sizeof(vxlan_spec->vni));
734                 hdr_field[idx].size = sizeof(vxlan_spec->rsvd1);
735                 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd1,
736                        sizeof(vxlan_spec->rsvd1));
737         } else {
738                 idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
739         }
740
741         if (vxlan_mask) {
742                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->flags,
743                        sizeof(vxlan_mask->flags));
744                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd0,
745                        sizeof(vxlan_mask->rsvd0));
746                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->vni,
747                        sizeof(vxlan_mask->vni));
748                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd1,
749                        sizeof(vxlan_mask->rsvd1));
750         }
751         *field_idx = idx; /* Add number of vxlan header elements */
752
753         /* Update the hdr_bitmap with vxlan */
754         ULP_BITMAP_SET(hdrbitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
755         return BNXT_TF_RC_SUCCESS;
756 }
757
758 /* Function to handle the parsing of RTE Flow item void Header */
759 int32_t
760 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
761                          struct ulp_rte_hdr_bitmap *hdr_bit __rte_unused,
762                          struct ulp_rte_hdr_field *hdr_field __rte_unused,
763                          uint32_t *field_idx __rte_unused,
764                          uint32_t *vlan_idx __rte_unused)
765 {
766         return BNXT_TF_RC_SUCCESS;
767 }