4339032a78cb2c2b7f5e9fbda440c0e67d69d4c6
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
12 #include "tfp.h"
13
14 /* Inline Func to read integer that is stored in big endian format */
15 static inline void ulp_util_field_int_read(uint8_t *buffer,
16                                            uint32_t *val)
17 {
18         uint32_t temp_val;
19
20         memcpy(&temp_val, buffer, sizeof(uint32_t));
21         *val = rte_be_to_cpu_32(temp_val);
22 }
23
24 /* Inline Func to write integer that is stored in big endian format */
25 static inline void ulp_util_field_int_write(uint8_t *buffer,
26                                             uint32_t val)
27 {
28         uint32_t temp_val = rte_cpu_to_be_32(val);
29
30         memcpy(buffer, &temp_val, sizeof(uint32_t));
31 }
32
33 /* Utility function to skip the void items. */
34 static inline int32_t
35 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
36 {
37         if (!*item)
38                 return 0;
39         if (increment)
40                 (*item)++;
41         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
42                 (*item)++;
43         if (*item)
44                 return 1;
45         return 0;
46 }
47
48 /*
49  * Function to handle the parsing of RTE Flows and placing
50  * the RTE flow items into the ulp structures.
51  */
52 int32_t
53 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
54                               struct ulp_rte_hdr_bitmap *hdr_bitmap,
55                               struct ulp_rte_hdr_field *hdr_field)
56 {
57         const struct rte_flow_item *item = pattern;
58         uint32_t field_idx = BNXT_ULP_HDR_FIELD_LAST;
59         uint32_t vlan_idx = 0;
60         struct bnxt_ulp_rte_hdr_info *hdr_info;
61
62         /* Parse all the items in the pattern */
63         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
64                 /* get the header information from the flow_hdr_info table */
65                 hdr_info = &ulp_hdr_info[item->type];
66                 if (hdr_info->hdr_type ==
67                     BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
68                         BNXT_TF_DBG(ERR,
69                                     "Truflow parser does not support type %d\n",
70                                     item->type);
71                         return BNXT_TF_RC_PARSE_ERR;
72                 } else if (hdr_info->hdr_type ==
73                            BNXT_ULP_HDR_TYPE_SUPPORTED) {
74                         /* call the registered callback handler */
75                         if (hdr_info->proto_hdr_func) {
76                                 if (hdr_info->proto_hdr_func(item,
77                                                              hdr_bitmap,
78                                                              hdr_field,
79                                                              &field_idx,
80                                                              &vlan_idx) !=
81                                     BNXT_TF_RC_SUCCESS) {
82                                         return BNXT_TF_RC_ERROR;
83                                 }
84                         }
85                 }
86                 item++;
87         }
88         /* update the implied SVIF */
89         (void)ulp_rte_parser_svif_process(hdr_bitmap, hdr_field);
90         return BNXT_TF_RC_SUCCESS;
91 }
92
93 /*
94  * Function to handle the parsing of RTE Flows and placing
95  * the RTE flow actions into the ulp structures.
96  */
97 int32_t
98 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
99                               struct ulp_rte_act_bitmap *act_bitmap,
100                               struct ulp_rte_act_prop *act_prop)
101 {
102         const struct rte_flow_action *action_item = actions;
103         struct bnxt_ulp_rte_act_info *hdr_info;
104
105         /* Parse all the items in the pattern */
106         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
107                 /* get the header information from the flow_hdr_info table */
108                 hdr_info = &ulp_act_info[action_item->type];
109                 if (hdr_info->act_type ==
110                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
111                         BNXT_TF_DBG(ERR,
112                                     "Truflow parser does not support act %u\n",
113                                     action_item->type);
114                         return BNXT_TF_RC_ERROR;
115                 } else if (hdr_info->act_type ==
116                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
117                         /* call the registered callback handler */
118                         if (hdr_info->proto_act_func) {
119                                 if (hdr_info->proto_act_func(action_item,
120                                                              act_bitmap,
121                                                              act_prop) !=
122                                     BNXT_TF_RC_SUCCESS) {
123                                         return BNXT_TF_RC_ERROR;
124                                 }
125                         }
126                 }
127                 action_item++;
128         }
129         return BNXT_TF_RC_SUCCESS;
130 }
131
132 /* Function to handle the parsing of RTE Flow item PF Header. */
133 static int32_t
134 ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap,
135                         struct ulp_rte_hdr_field *hdr_field,
136                         enum rte_flow_item_type proto,
137                         uint32_t dir,
138                         uint16_t svif,
139                         uint16_t mask)
140 {
141         uint16_t port_id = svif;
142
143         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF)) {
144                 BNXT_TF_DBG(ERR,
145                             "SVIF already set,"
146                             " multiple sources not supported\n");
147                 return BNXT_TF_RC_ERROR;
148         }
149
150         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF. */
151         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF);
152
153         if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
154                 /* perform the conversion from dpdk port to svif */
155                 if (dir == ULP_DIR_EGRESS)
156                         svif = bnxt_get_svif(port_id, true);
157                 else
158                         svif = bnxt_get_svif(port_id, false);
159         }
160
161         memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec,
162                &svif, sizeof(svif));
163         memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask,
164                &mask, sizeof(mask));
165         hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif);
166         return BNXT_TF_RC_SUCCESS;
167 }
168
169 /* Function to handle the parsing of the RTE port id
170  */
171 int32_t
172 ulp_rte_parser_svif_process(struct ulp_rte_hdr_bitmap   *hdr_bitmap,
173                             struct ulp_rte_hdr_field    *hdr_field)
174 {
175         uint16_t port_id = 0;
176         uint32_t dir = 0;
177         uint8_t *buffer;
178         uint16_t svif_mask = 0xFFFF;
179
180         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF))
181                 return BNXT_TF_RC_SUCCESS;
182
183         /* SVIF not set. So get the port id and direction */
184         buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec;
185         memcpy(&port_id, buffer, sizeof(port_id));
186         memcpy(&dir, buffer + sizeof(port_id), sizeof(dir));
187         memset(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, 0,
188                RTE_PARSER_FLOW_HDR_FIELD_SIZE);
189
190         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
191                                        RTE_FLOW_ITEM_TYPE_PORT_ID,
192                                        dir, port_id, svif_mask);
193 }
194
195 /* Function to handle the parsing of RTE Flow item PF Header. */
196 int32_t
197 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
198                        struct ulp_rte_hdr_bitmap *hdr_bitmap,
199                        struct ulp_rte_hdr_field *hdr_field,
200                        uint32_t *field_idx __rte_unused,
201                        uint32_t *vlan_idx __rte_unused)
202 {
203         uint16_t port_id = 0;
204         uint32_t dir = 0;
205         uint8_t *buffer;
206         uint16_t svif_mask = 0xFFFF;
207
208         buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec;
209         memcpy(&port_id, buffer, sizeof(port_id));
210         memcpy(&dir, buffer + sizeof(port_id), sizeof(dir));
211         memset(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec, 0,
212                RTE_PARSER_FLOW_HDR_FIELD_SIZE);
213
214         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
215                                        item->type,
216                                        dir, port_id, svif_mask);
217 }
218
219 /* Function to handle the parsing of RTE Flow item VF Header. */
220 int32_t
221 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
222                        struct ulp_rte_hdr_bitmap *hdr_bitmap,
223                        struct ulp_rte_hdr_field  *hdr_field,
224                        uint32_t *field_idx __rte_unused,
225                        uint32_t *vlan_idx __rte_unused)
226 {
227         const struct rte_flow_item_vf *vf_spec, *vf_mask;
228         uint16_t svif = 0, mask = 0;
229
230         vf_spec = item->spec;
231         vf_mask = item->mask;
232
233         /*
234          * Copy the rte_flow_item for eth into hdr_field using ethernet
235          * header fields.
236          */
237         if (vf_spec)
238                 svif = (uint16_t)vf_spec->id;
239         if (vf_mask)
240                 mask = (uint16_t)vf_mask->id;
241
242         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
243                                        item->type, 0, svif, mask);
244 }
245
246 /* Function to handle the parsing of RTE Flow item port id  Header. */
247 int32_t
248 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
249                             struct ulp_rte_hdr_bitmap *hdr_bitmap,
250                             struct ulp_rte_hdr_field *hdr_field,
251                             uint32_t *field_idx __rte_unused,
252                             uint32_t *vlan_idx __rte_unused)
253 {
254         const struct rte_flow_item_port_id *port_spec, *port_mask;
255         uint16_t svif = 0, mask = 0;
256         uint32_t dir;
257         uint8_t *buffer;
258
259         port_spec = item->spec;
260         port_mask = item->mask;
261
262         /*
263          * Copy the rte_flow_item for Port into hdr_field using port id
264          * header fields.
265          */
266         if (port_spec)
267                 svif = (uint16_t)port_spec->id;
268         if (port_mask)
269                 mask = (uint16_t)port_mask->id;
270
271         buffer = hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec;
272         memcpy(&dir, buffer + sizeof(uint16_t), sizeof(uint16_t));
273
274         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
275                                        item->type, dir, svif, mask);
276 }
277
278 /* Function to handle the parsing of RTE Flow item phy port Header. */
279 int32_t
280 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
281                              struct ulp_rte_hdr_bitmap *hdr_bitmap,
282                              struct ulp_rte_hdr_field *hdr_field,
283                              uint32_t *field_idx __rte_unused,
284                              uint32_t *vlan_idx __rte_unused)
285 {
286         const struct rte_flow_item_phy_port *port_spec, *port_mask;
287         uint32_t svif = 0, mask = 0;
288
289         port_spec = item->spec;
290         port_mask = item->mask;
291
292         /* Copy the rte_flow_item for phy port into hdr_field */
293         if (port_spec)
294                 svif = port_spec->index;
295         if (port_mask)
296                 mask = port_mask->index;
297
298         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
299                                        item->type, 0, svif, mask);
300 }
301
302 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
303 int32_t
304 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
305                         struct ulp_rte_hdr_bitmap *hdr_bitmap,
306                         struct ulp_rte_hdr_field *hdr_field,
307                         uint32_t *field_idx,
308                         uint32_t *vlan_idx)
309 {
310         const struct rte_flow_item_eth *eth_spec, *eth_mask;
311         uint32_t idx = *field_idx;
312         uint32_t mdx = *field_idx;
313         uint64_t set_flag = 0;
314
315         eth_spec = item->spec;
316         eth_mask = item->mask;
317
318         /*
319          * Copy the rte_flow_item for eth into hdr_field using ethernet
320          * header fields
321          */
322         if (eth_spec) {
323                 hdr_field[idx].size = sizeof(eth_spec->dst.addr_bytes);
324                 memcpy(hdr_field[idx++].spec, eth_spec->dst.addr_bytes,
325                        sizeof(eth_spec->dst.addr_bytes));
326                 hdr_field[idx].size = sizeof(eth_spec->src.addr_bytes);
327                 memcpy(hdr_field[idx++].spec, eth_spec->src.addr_bytes,
328                        sizeof(eth_spec->src.addr_bytes));
329                 hdr_field[idx].size = sizeof(eth_spec->type);
330                 memcpy(hdr_field[idx++].spec, &eth_spec->type,
331                        sizeof(eth_spec->type));
332         } else {
333                 idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
334         }
335
336         if (eth_mask) {
337                 memcpy(hdr_field[mdx++].mask, eth_mask->dst.addr_bytes,
338                        sizeof(eth_mask->dst.addr_bytes));
339                 memcpy(hdr_field[mdx++].mask, eth_mask->src.addr_bytes,
340                        sizeof(eth_mask->src.addr_bytes));
341                 memcpy(hdr_field[mdx++].mask, &eth_mask->type,
342                        sizeof(eth_mask->type));
343         }
344         /* Add number of vlan header elements */
345         *field_idx = idx + BNXT_ULP_PROTO_HDR_VLAN_NUM;
346         *vlan_idx = idx;
347
348         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
349         set_flag = ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
350         if (set_flag)
351                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
352         else
353                 ULP_BITMAP_RESET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
354
355         /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
356         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
357
358         return BNXT_TF_RC_SUCCESS;
359 }
360
361 /* Function to handle the parsing of RTE Flow item Vlan Header. */
362 int32_t
363 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
364                          struct ulp_rte_hdr_bitmap *hdr_bitmap,
365                          struct ulp_rte_hdr_field *hdr_field,
366                          uint32_t *field_idx __rte_unused,
367                          uint32_t *vlan_idx)
368 {
369         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
370         uint32_t idx = *vlan_idx;
371         uint32_t mdx = *vlan_idx;
372         uint16_t vlan_tag, priority;
373         uint32_t outer_vtag_num = 0, inner_vtag_num = 0;
374         uint8_t *outer_tag_buffer;
375         uint8_t *inner_tag_buffer;
376
377         vlan_spec = item->spec;
378         vlan_mask = item->mask;
379         outer_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].spec;
380         inner_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].spec;
381
382         /*
383          * Copy the rte_flow_item for vlan into hdr_field using Vlan
384          * header fields
385          */
386         if (vlan_spec) {
387                 vlan_tag = ntohs(vlan_spec->tci);
388                 priority = htons(vlan_tag >> 13);
389                 vlan_tag &= 0xfff;
390                 vlan_tag = htons(vlan_tag);
391
392                 hdr_field[idx].size = sizeof(priority);
393                 memcpy(hdr_field[idx++].spec, &priority, sizeof(priority));
394                 hdr_field[idx].size = sizeof(vlan_tag);
395                 memcpy(hdr_field[idx++].spec, &vlan_tag, sizeof(vlan_tag));
396                 hdr_field[idx].size = sizeof(vlan_spec->inner_type);
397                 memcpy(hdr_field[idx++].spec, &vlan_spec->inner_type,
398                        sizeof(vlan_spec->inner_type));
399         } else {
400                 idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
401         }
402
403         if (vlan_mask) {
404                 vlan_tag = ntohs(vlan_mask->tci);
405                 priority = htons(vlan_tag >> 13);
406                 vlan_tag &= 0xfff;
407                 vlan_tag = htons(vlan_tag);
408
409                 memcpy(hdr_field[mdx++].mask, &priority, sizeof(priority));
410                 memcpy(hdr_field[mdx++].mask, &vlan_tag, sizeof(vlan_tag));
411                 memcpy(hdr_field[mdx++].mask, &vlan_mask->inner_type,
412                        sizeof(vlan_mask->inner_type));
413         }
414         /* Set the vlan index to new incremented value */
415         *vlan_idx = idx;
416
417         /* Get the outer tag and inner tag counts */
418         ulp_util_field_int_read(outer_tag_buffer, &outer_vtag_num);
419         ulp_util_field_int_read(inner_tag_buffer, &inner_vtag_num);
420
421         /* Update the hdr_bitmap of the vlans */
422         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
423             !ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) {
424                 /* Set the outer vlan bit and update the vlan tag num */
425                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN);
426                 outer_vtag_num++;
427                 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
428                 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
429                                                         sizeof(uint32_t);
430         } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
431                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
432                                     BNXT_ULP_HDR_BIT_OO_VLAN) &&
433                    !ULP_BITMAP_ISSET(hdr_bitmap->bits,
434                                      BNXT_ULP_HDR_BIT_OI_VLAN)) {
435                 /* Set the outer vlan bit and update the vlan tag num */
436                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OI_VLAN);
437                 outer_vtag_num++;
438                 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
439                 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
440                                                             sizeof(uint32_t);
441         } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
442                                     BNXT_ULP_HDR_BIT_O_ETH) &&
443                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
444                                     BNXT_ULP_HDR_BIT_OO_VLAN) &&
445                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
446                                     BNXT_ULP_HDR_BIT_OI_VLAN) &&
447                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
448                                     BNXT_ULP_HDR_BIT_I_ETH) &&
449                    !ULP_BITMAP_ISSET(hdr_bitmap->bits,
450                                      BNXT_ULP_HDR_BIT_IO_VLAN)) {
451                 /* Set the inner vlan bit and update the vlan tag num */
452                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_IO_VLAN);
453                 inner_vtag_num++;
454                 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
455                 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
456                                                             sizeof(uint32_t);
457         } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
458                                     BNXT_ULP_HDR_BIT_O_ETH) &&
459                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
460                                     BNXT_ULP_HDR_BIT_OO_VLAN) &&
461                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
462                                     BNXT_ULP_HDR_BIT_OI_VLAN) &&
463                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
464                                     BNXT_ULP_HDR_BIT_I_ETH) &&
465                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
466                                     BNXT_ULP_HDR_BIT_IO_VLAN) &&
467                    !ULP_BITMAP_ISSET(hdr_bitmap->bits,
468                                      BNXT_ULP_HDR_BIT_II_VLAN)) {
469                 /* Set the inner vlan bit and update the vlan tag num */
470                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_II_VLAN);
471                 inner_vtag_num++;
472                 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
473                 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
474                                                             sizeof(uint32_t);
475         } else {
476                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
477                 return BNXT_TF_RC_ERROR;
478         }
479         return BNXT_TF_RC_SUCCESS;
480 }
481
482 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
483 int32_t
484 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
485                          struct ulp_rte_hdr_bitmap *hdr_bitmap,
486                          struct ulp_rte_hdr_field *hdr_field,
487                          uint32_t *field_idx,
488                          uint32_t *vlan_idx __rte_unused)
489 {
490         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
491         uint32_t idx = *field_idx;
492         uint32_t mdx = *field_idx;
493
494         ipv4_spec = item->spec;
495         ipv4_mask = item->mask;
496
497         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
498                 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
499                 return BNXT_TF_RC_ERROR;
500         }
501
502         /*
503          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
504          * header fields
505          */
506         if (ipv4_spec) {
507                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.version_ihl);
508                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.version_ihl,
509                        sizeof(ipv4_spec->hdr.version_ihl));
510                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.type_of_service);
511                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.type_of_service,
512                        sizeof(ipv4_spec->hdr.type_of_service));
513                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.total_length);
514                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.total_length,
515                        sizeof(ipv4_spec->hdr.total_length));
516                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.packet_id);
517                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.packet_id,
518                        sizeof(ipv4_spec->hdr.packet_id));
519                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.fragment_offset);
520                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.fragment_offset,
521                        sizeof(ipv4_spec->hdr.fragment_offset));
522                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.time_to_live);
523                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.time_to_live,
524                        sizeof(ipv4_spec->hdr.time_to_live));
525                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.next_proto_id);
526                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.next_proto_id,
527                        sizeof(ipv4_spec->hdr.next_proto_id));
528                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.hdr_checksum);
529                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.hdr_checksum,
530                        sizeof(ipv4_spec->hdr.hdr_checksum));
531                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.src_addr);
532                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.src_addr,
533                        sizeof(ipv4_spec->hdr.src_addr));
534                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.dst_addr);
535                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.dst_addr,
536                        sizeof(ipv4_spec->hdr.dst_addr));
537         } else {
538                 idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
539         }
540
541         if (ipv4_mask) {
542                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.version_ihl,
543                        sizeof(ipv4_mask->hdr.version_ihl));
544                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.type_of_service,
545                        sizeof(ipv4_mask->hdr.type_of_service));
546                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.total_length,
547                        sizeof(ipv4_mask->hdr.total_length));
548                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.packet_id,
549                        sizeof(ipv4_mask->hdr.packet_id));
550                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.fragment_offset,
551                        sizeof(ipv4_mask->hdr.fragment_offset));
552                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.time_to_live,
553                        sizeof(ipv4_mask->hdr.time_to_live));
554                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.next_proto_id,
555                        sizeof(ipv4_mask->hdr.next_proto_id));
556                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.hdr_checksum,
557                        sizeof(ipv4_mask->hdr.hdr_checksum));
558                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.src_addr,
559                        sizeof(ipv4_mask->hdr.src_addr));
560                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.dst_addr,
561                        sizeof(ipv4_mask->hdr.dst_addr));
562         }
563         *field_idx = idx; /* Number of ipv4 header elements */
564
565         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
566         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
567             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
568             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
569                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
570                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
571         } else {
572                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
573                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
574         }
575         return BNXT_TF_RC_SUCCESS;
576 }
577
578 /* Function to handle the parsing of RTE Flow item IPV6 Header */
579 int32_t
580 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
581                          struct ulp_rte_hdr_bitmap *hdr_bitmap,
582                          struct ulp_rte_hdr_field *hdr_field,
583                          uint32_t *field_idx,
584                          uint32_t *vlan_idx __rte_unused)
585 {
586         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
587         uint32_t idx = *field_idx;
588         uint32_t mdx = *field_idx;
589
590         ipv6_spec = item->spec;
591         ipv6_mask = item->mask;
592
593         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
594                 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
595                 return BNXT_TF_RC_ERROR;
596         }
597
598         /*
599          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
600          * header fields
601          */
602         if (ipv6_spec) {
603                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.vtc_flow);
604                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.vtc_flow,
605                        sizeof(ipv6_spec->hdr.vtc_flow));
606                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.payload_len);
607                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.payload_len,
608                        sizeof(ipv6_spec->hdr.payload_len));
609                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.proto);
610                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.proto,
611                        sizeof(ipv6_spec->hdr.proto));
612                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.hop_limits);
613                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.hop_limits,
614                        sizeof(ipv6_spec->hdr.hop_limits));
615                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.src_addr);
616                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.src_addr,
617                        sizeof(ipv6_spec->hdr.src_addr));
618                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.dst_addr);
619                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.dst_addr,
620                        sizeof(ipv6_spec->hdr.dst_addr));
621         } else {
622                 idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
623         }
624
625         if (ipv6_mask) {
626                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.vtc_flow,
627                        sizeof(ipv6_mask->hdr.vtc_flow));
628                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.payload_len,
629                        sizeof(ipv6_mask->hdr.payload_len));
630                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.proto,
631                        sizeof(ipv6_mask->hdr.proto));
632                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.hop_limits,
633                        sizeof(ipv6_mask->hdr.hop_limits));
634                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.src_addr,
635                        sizeof(ipv6_mask->hdr.src_addr));
636                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.dst_addr,
637                        sizeof(ipv6_mask->hdr.dst_addr));
638         }
639         *field_idx = idx; /* add number of ipv6 header elements */
640
641         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
642         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
643             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
644             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
645                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
646                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
647         } else {
648                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
649                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
650         }
651         return BNXT_TF_RC_SUCCESS;
652 }
653
654 /* Function to handle the parsing of RTE Flow item UDP Header. */
655 int32_t
656 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
657                         struct ulp_rte_hdr_bitmap *hdr_bitmap,
658                         struct ulp_rte_hdr_field *hdr_field,
659                         uint32_t *field_idx,
660                         uint32_t *vlan_idx __rte_unused)
661 {
662         const struct rte_flow_item_udp *udp_spec, *udp_mask;
663         uint32_t idx = *field_idx;
664         uint32_t mdx = *field_idx;
665
666         udp_spec = item->spec;
667         udp_mask = item->mask;
668
669         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
670                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
671                 return BNXT_TF_RC_ERROR;
672         }
673
674         /*
675          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
676          * header fields
677          */
678         if (udp_spec) {
679                 hdr_field[idx].size = sizeof(udp_spec->hdr.src_port);
680                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.src_port,
681                        sizeof(udp_spec->hdr.src_port));
682                 hdr_field[idx].size = sizeof(udp_spec->hdr.dst_port);
683                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dst_port,
684                        sizeof(udp_spec->hdr.dst_port));
685                 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_len);
686                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_len,
687                        sizeof(udp_spec->hdr.dgram_len));
688                 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_cksum);
689                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_cksum,
690                        sizeof(udp_spec->hdr.dgram_cksum));
691         } else {
692                 idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
693         }
694
695         if (udp_mask) {
696                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.src_port,
697                        sizeof(udp_mask->hdr.src_port));
698                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dst_port,
699                        sizeof(udp_mask->hdr.dst_port));
700                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_len,
701                        sizeof(udp_mask->hdr.dgram_len));
702                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_cksum,
703                        sizeof(udp_mask->hdr.dgram_cksum));
704         }
705         *field_idx = idx; /* Add number of UDP header elements */
706
707         /* Set the udp header bitmap and computed l4 header bitmaps */
708         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
709             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
710             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
711                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
712                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
713         } else {
714                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
715                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
716         }
717         return BNXT_TF_RC_SUCCESS;
718 }
719
720 /* Function to handle the parsing of RTE Flow item TCP Header. */
721 int32_t
722 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
723                         struct ulp_rte_hdr_bitmap *hdr_bitmap,
724                         struct ulp_rte_hdr_field *hdr_field,
725                         uint32_t *field_idx,
726                         uint32_t *vlan_idx __rte_unused)
727 {
728         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
729         uint32_t idx = *field_idx;
730         uint32_t mdx = *field_idx;
731
732         tcp_spec = item->spec;
733         tcp_mask = item->mask;
734
735         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
736                 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
737                 return BNXT_TF_RC_ERROR;
738         }
739
740         /*
741          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
742          * header fields
743          */
744         if (tcp_spec) {
745                 hdr_field[idx].size = sizeof(tcp_spec->hdr.src_port);
746                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.src_port,
747                        sizeof(tcp_spec->hdr.src_port));
748                 hdr_field[idx].size = sizeof(tcp_spec->hdr.dst_port);
749                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.dst_port,
750                        sizeof(tcp_spec->hdr.dst_port));
751                 hdr_field[idx].size = sizeof(tcp_spec->hdr.sent_seq);
752                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.sent_seq,
753                        sizeof(tcp_spec->hdr.sent_seq));
754                 hdr_field[idx].size = sizeof(tcp_spec->hdr.recv_ack);
755                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.recv_ack,
756                        sizeof(tcp_spec->hdr.recv_ack));
757                 hdr_field[idx].size = sizeof(tcp_spec->hdr.data_off);
758                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.data_off,
759                        sizeof(tcp_spec->hdr.data_off));
760                 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_flags);
761                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_flags,
762                        sizeof(tcp_spec->hdr.tcp_flags));
763                 hdr_field[idx].size = sizeof(tcp_spec->hdr.rx_win);
764                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.rx_win,
765                        sizeof(tcp_spec->hdr.rx_win));
766                 hdr_field[idx].size = sizeof(tcp_spec->hdr.cksum);
767                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.cksum,
768                        sizeof(tcp_spec->hdr.cksum));
769                 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_urp);
770                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_urp,
771                        sizeof(tcp_spec->hdr.tcp_urp));
772         } else {
773                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
774         }
775
776         if (tcp_mask) {
777                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.src_port,
778                        sizeof(tcp_mask->hdr.src_port));
779                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.dst_port,
780                        sizeof(tcp_mask->hdr.dst_port));
781                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.sent_seq,
782                        sizeof(tcp_mask->hdr.sent_seq));
783                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.recv_ack,
784                        sizeof(tcp_mask->hdr.recv_ack));
785                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.data_off,
786                        sizeof(tcp_mask->hdr.data_off));
787                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_flags,
788                        sizeof(tcp_mask->hdr.tcp_flags));
789                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.rx_win,
790                        sizeof(tcp_mask->hdr.rx_win));
791                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.cksum,
792                        sizeof(tcp_mask->hdr.cksum));
793                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_urp,
794                        sizeof(tcp_mask->hdr.tcp_urp));
795         }
796         *field_idx = idx; /* add number of TCP header elements */
797
798         /* Set the udp header bitmap and computed l4 header bitmaps */
799         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
800             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
801             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
802                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
803                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
804         } else {
805                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
806                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
807         }
808         return BNXT_TF_RC_SUCCESS;
809 }
810
811 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
812 int32_t
813 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
814                           struct ulp_rte_hdr_bitmap *hdrbitmap,
815                           struct ulp_rte_hdr_field *hdr_field,
816                           uint32_t *field_idx,
817                           uint32_t *vlan_idx __rte_unused)
818 {
819         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
820         uint32_t idx = *field_idx;
821         uint32_t mdx = *field_idx;
822
823         vxlan_spec = item->spec;
824         vxlan_mask = item->mask;
825
826         /*
827          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
828          * header fields
829          */
830         if (vxlan_spec) {
831                 hdr_field[idx].size = sizeof(vxlan_spec->flags);
832                 memcpy(hdr_field[idx++].spec, &vxlan_spec->flags,
833                        sizeof(vxlan_spec->flags));
834                 hdr_field[idx].size = sizeof(vxlan_spec->rsvd0);
835                 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd0,
836                        sizeof(vxlan_spec->rsvd0));
837                 hdr_field[idx].size = sizeof(vxlan_spec->vni);
838                 memcpy(hdr_field[idx++].spec, &vxlan_spec->vni,
839                        sizeof(vxlan_spec->vni));
840                 hdr_field[idx].size = sizeof(vxlan_spec->rsvd1);
841                 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd1,
842                        sizeof(vxlan_spec->rsvd1));
843         } else {
844                 idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
845         }
846
847         if (vxlan_mask) {
848                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->flags,
849                        sizeof(vxlan_mask->flags));
850                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd0,
851                        sizeof(vxlan_mask->rsvd0));
852                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->vni,
853                        sizeof(vxlan_mask->vni));
854                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd1,
855                        sizeof(vxlan_mask->rsvd1));
856         }
857         *field_idx = idx; /* Add number of vxlan header elements */
858
859         /* Update the hdr_bitmap with vxlan */
860         ULP_BITMAP_SET(hdrbitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
861         return BNXT_TF_RC_SUCCESS;
862 }
863
864 /* Function to handle the parsing of RTE Flow item void Header */
865 int32_t
866 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
867                          struct ulp_rte_hdr_bitmap *hdr_bit __rte_unused,
868                          struct ulp_rte_hdr_field *hdr_field __rte_unused,
869                          uint32_t *field_idx __rte_unused,
870                          uint32_t *vlan_idx __rte_unused)
871 {
872         return BNXT_TF_RC_SUCCESS;
873 }
874
875 /* Function to handle the parsing of RTE Flow action void Header. */
876 int32_t
877 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
878                          struct ulp_rte_act_bitmap *act __rte_unused,
879                          struct ulp_rte_act_prop *act_prop __rte_unused)
880 {
881         return BNXT_TF_RC_SUCCESS;
882 }
883
884 /* Function to handle the parsing of RTE Flow action Mark Header. */
885 int32_t
886 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
887                          struct ulp_rte_act_bitmap *act,
888                          struct ulp_rte_act_prop *act_prop)
889 {
890         const struct rte_flow_action_mark *mark;
891         uint32_t mark_id = 0;
892
893         mark = action_item->conf;
894         if (mark) {
895                 mark_id = tfp_cpu_to_be_32(mark->id);
896                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
897                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
898
899                 /* Update the hdr_bitmap with vxlan */
900                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
901                 return BNXT_TF_RC_SUCCESS;
902         }
903         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
904         return BNXT_TF_RC_ERROR;
905 }
906
907 /* Function to handle the parsing of RTE Flow action RSS Header. */
908 int32_t
909 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
910                         struct ulp_rte_act_bitmap *act,
911                         struct ulp_rte_act_prop *act_prop __rte_unused)
912 {
913         const struct rte_flow_action_rss *rss;
914
915         rss = action_item->conf;
916         if (rss) {
917                 /* Update the hdr_bitmap with vxlan */
918                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_RSS);
919                 return BNXT_TF_RC_SUCCESS;
920         }
921         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
922         return BNXT_TF_RC_ERROR;
923 }
924
925 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
926 int32_t
927 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
928                                 struct ulp_rte_act_bitmap *act,
929                                 struct ulp_rte_act_prop *ap)
930 {
931         const struct rte_flow_action_vxlan_encap *vxlan_encap;
932         const struct rte_flow_item *item;
933         const struct rte_flow_item_eth *eth_spec;
934         const struct rte_flow_item_ipv4 *ipv4_spec;
935         const struct rte_flow_item_ipv6 *ipv6_spec;
936         struct rte_flow_item_vxlan vxlan_spec;
937         uint32_t vlan_num = 0, vlan_size = 0;
938         uint32_t ip_size = 0, ip_type = 0;
939         uint32_t vxlan_size = 0;
940         uint8_t *buff;
941         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
942         const uint8_t   def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
943                                     0x00, 0x40, 0x11};
944
945         vxlan_encap = action_item->conf;
946         if (!vxlan_encap) {
947                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
948                 return BNXT_TF_RC_ERROR;
949         }
950
951         item = vxlan_encap->definition;
952         if (!item) {
953                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
954                 return BNXT_TF_RC_ERROR;
955         }
956
957         if (!ulp_rte_item_skip_void(&item, 0))
958                 return BNXT_TF_RC_ERROR;
959
960         /* must have ethernet header */
961         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
962                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
963                 return BNXT_TF_RC_ERROR;
964         }
965         eth_spec = item->spec;
966         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
967         ulp_encap_buffer_copy(buff,
968                               eth_spec->dst.addr_bytes,
969                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
970
971         /* Goto the next item */
972         if (!ulp_rte_item_skip_void(&item, 1))
973                 return BNXT_TF_RC_ERROR;
974
975         /* May have vlan header */
976         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
977                 vlan_num++;
978                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
979                 ulp_encap_buffer_copy(buff,
980                                       item->spec,
981                                       sizeof(struct rte_flow_item_vlan));
982
983                 if (!ulp_rte_item_skip_void(&item, 1))
984                         return BNXT_TF_RC_ERROR;
985         }
986
987         /* may have two vlan headers */
988         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
989                 vlan_num++;
990                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
991                        sizeof(struct rte_flow_item_vlan)],
992                        item->spec,
993                        sizeof(struct rte_flow_item_vlan));
994                 if (!ulp_rte_item_skip_void(&item, 1))
995                         return BNXT_TF_RC_ERROR;
996         }
997         /* Update the vlan count and size of more than one */
998         if (vlan_num) {
999                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1000                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1001                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1002                        &vlan_num,
1003                        sizeof(uint32_t));
1004                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1005                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1006                        &vlan_size,
1007                        sizeof(uint32_t));
1008         }
1009
1010         /* L3 must be IPv4, IPv6 */
1011         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1012                 ipv4_spec = item->spec;
1013                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1014
1015                 /* copy the ipv4 details */
1016                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1017                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1018                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1019                         ulp_encap_buffer_copy(buff,
1020                                               def_ipv4_hdr,
1021                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1022                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1023                 } else {
1024                         const uint8_t *tmp_buff;
1025
1026                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1027                         ulp_encap_buffer_copy(buff,
1028                                               &ipv4_spec->hdr.version_ihl,
1029                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1030                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1031                              BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1032                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1033                         ulp_encap_buffer_copy(buff,
1034                                               tmp_buff,
1035                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1036                 }
1037                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1038                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1039                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1040                 ulp_encap_buffer_copy(buff,
1041                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1042                                       BNXT_ULP_ENCAP_IPV4_DEST_IP);
1043
1044                 /* Update the ip size details */
1045                 ip_size = tfp_cpu_to_be_32(ip_size);
1046                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1047                        &ip_size, sizeof(uint32_t));
1048
1049                 /* update the ip type */
1050                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1051                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1052                        &ip_type, sizeof(uint32_t));
1053
1054                 if (!ulp_rte_item_skip_void(&item, 1))
1055                         return BNXT_TF_RC_ERROR;
1056         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1057                 ipv6_spec = item->spec;
1058                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1059
1060                 /* copy the ipv4 details */
1061                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1062                        ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1063
1064                 /* Update the ip size details */
1065                 ip_size = tfp_cpu_to_be_32(ip_size);
1066                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1067                        &ip_size, sizeof(uint32_t));
1068
1069                  /* update the ip type */
1070                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1071                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1072                        &ip_type, sizeof(uint32_t));
1073
1074                 if (!ulp_rte_item_skip_void(&item, 1))
1075                         return BNXT_TF_RC_ERROR;
1076         } else {
1077                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1078                 return BNXT_TF_RC_ERROR;
1079         }
1080
1081         /* L4 is UDP */
1082         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1083                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1084                 return BNXT_TF_RC_ERROR;
1085         }
1086         /* copy the udp details */
1087         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1088                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1089
1090         if (!ulp_rte_item_skip_void(&item, 1))
1091                 return BNXT_TF_RC_ERROR;
1092
1093         /* Finally VXLAN */
1094         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1095                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1096                 return BNXT_TF_RC_ERROR;
1097         }
1098         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1099         /* copy the vxlan details */
1100         memcpy(&vxlan_spec, item->spec, vxlan_size);
1101         vxlan_spec.flags = 0x08;
1102         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1103                               (const uint8_t *)&vxlan_spec,
1104                               vxlan_size);
1105         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1106         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1107                &vxlan_size, sizeof(uint32_t));
1108
1109         /*update the hdr_bitmap with vxlan */
1110         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1111         return BNXT_TF_RC_SUCCESS;
1112 }
1113
1114 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1115 int32_t
1116 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1117                                 __rte_unused,
1118                                 struct ulp_rte_act_bitmap *act,
1119                                 struct ulp_rte_act_prop *act_prop __rte_unused)
1120 {
1121         /* update the hdr_bitmap with vxlan */
1122         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1123         return BNXT_TF_RC_SUCCESS;
1124 }
1125
1126 /* Function to handle the parsing of RTE Flow action drop Header. */
1127 int32_t
1128 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1129                          struct ulp_rte_act_bitmap *act,
1130                          struct ulp_rte_act_prop *act_prop __rte_unused)
1131 {
1132         /* Update the hdr_bitmap with drop */
1133         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_DROP);
1134         return BNXT_TF_RC_SUCCESS;
1135 }
1136
1137 /* Function to handle the parsing of RTE Flow action count. */
1138 int32_t
1139 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1140                           struct ulp_rte_act_bitmap *act,
1141                           struct ulp_rte_act_prop *act_prop __rte_unused)
1142
1143 {
1144         const struct rte_flow_action_count *act_count;
1145
1146         act_count = action_item->conf;
1147         if (act_count) {
1148                 if (act_count->shared) {
1149                         BNXT_TF_DBG(ERR,
1150                                     "Parse Error:Shared count not supported\n");
1151                         return BNXT_TF_RC_PARSE_ERR;
1152                 }
1153                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1154                        &act_count->id,
1155                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1156         }
1157
1158         /* Update the hdr_bitmap with count */
1159         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_COUNT);
1160         return BNXT_TF_RC_SUCCESS;
1161 }
1162
1163 /* Function to handle the parsing of RTE Flow action PF. */
1164 int32_t
1165 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1166                        struct ulp_rte_act_bitmap *act,
1167                        struct ulp_rte_act_prop *act_prop)
1168 {
1169         uint8_t *svif_buf;
1170         uint8_t *vnic_buffer;
1171         uint32_t svif;
1172
1173         /* Update the hdr_bitmap with vnic bit */
1174         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1175
1176         /* copy the PF of the current device into VNIC Property */
1177         svif_buf = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
1178         ulp_util_field_int_read(svif_buf, &svif);
1179         svif = (uint32_t)bnxt_get_vnic_id(svif);
1180         svif = htonl(svif);
1181         vnic_buffer = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
1182         ulp_util_field_int_write(vnic_buffer, svif);
1183
1184         return BNXT_TF_RC_SUCCESS;
1185 }
1186
1187 /* Function to handle the parsing of RTE Flow action VF. */
1188 int32_t
1189 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1190                        struct ulp_rte_act_bitmap *act,
1191                        struct ulp_rte_act_prop *act_prop)
1192 {
1193         const struct rte_flow_action_vf *vf_action;
1194
1195         vf_action = action_item->conf;
1196         if (vf_action) {
1197                 if (vf_action->original) {
1198                         BNXT_TF_DBG(ERR,
1199                                     "Parse Error:VF Original not supported\n");
1200                         return BNXT_TF_RC_PARSE_ERR;
1201                 }
1202                 /* TBD: Update the computed VNIC using VF conversion */
1203                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1204                        &vf_action->id,
1205                        BNXT_ULP_ACT_PROP_SZ_VNIC);
1206         }
1207
1208         /* Update the hdr_bitmap with count */
1209         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1210         return BNXT_TF_RC_SUCCESS;
1211 }
1212
1213 /* Function to handle the parsing of RTE Flow action port_id. */
1214 int32_t
1215 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1216                             struct ulp_rte_act_bitmap *act,
1217                             struct ulp_rte_act_prop *act_prop)
1218 {
1219         const struct rte_flow_action_port_id *port_id;
1220
1221         port_id = act_item->conf;
1222         if (port_id) {
1223                 if (port_id->original) {
1224                         BNXT_TF_DBG(ERR,
1225                                     "ParseErr:Portid Original not supported\n");
1226                         return BNXT_TF_RC_PARSE_ERR;
1227                 }
1228                 /* TBD: Update the computed VNIC using port conversion */
1229                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1230                        &port_id->id,
1231                        BNXT_ULP_ACT_PROP_SZ_VNIC);
1232         }
1233
1234         /* Update the hdr_bitmap with count */
1235         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1236         return BNXT_TF_RC_SUCCESS;
1237 }
1238
1239 /* Function to handle the parsing of RTE Flow action phy_port. */
1240 int32_t
1241 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1242                              struct ulp_rte_act_bitmap *act,
1243                              struct ulp_rte_act_prop *act_prop)
1244 {
1245         const struct rte_flow_action_phy_port *phy_port;
1246
1247         phy_port = action_item->conf;
1248         if (phy_port) {
1249                 if (phy_port->original) {
1250                         BNXT_TF_DBG(ERR,
1251                                     "Parse Err:Port Original not supported\n");
1252                         return BNXT_TF_RC_PARSE_ERR;
1253                 }
1254                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1255                        &phy_port->index,
1256                        BNXT_ULP_ACT_PROP_SZ_VPORT);
1257         }
1258
1259         /* Update the hdr_bitmap with count */
1260         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VPORT);
1261         return BNXT_TF_RC_SUCCESS;
1262 }