net/bnxt: support GRE flows
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
14 #include "tfp.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
18 #include "ulp_tun.h"
19
20 /* Local defines for the parsing functions */
21 #define ULP_VLAN_PRIORITY_SHIFT         13 /* First 3 bits */
22 #define ULP_VLAN_PRIORITY_MASK          0x700
23 #define ULP_VLAN_TAG_MASK               0xFFF /* Last 12 bits*/
24 #define ULP_UDP_PORT_VXLAN              4789
25
26 /* Utility function to skip the void items. */
27 static inline int32_t
28 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 {
30         if (!*item)
31                 return 0;
32         if (increment)
33                 (*item)++;
34         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
35                 (*item)++;
36         if (*item)
37                 return 1;
38         return 0;
39 }
40
41 /* Utility function to update the field_bitmap */
42 static void
43 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
44                                    uint32_t idx)
45 {
46         struct ulp_rte_hdr_field *field;
47
48         field = &params->hdr_field[idx];
49         if (ulp_bitmap_notzero(field->mask, field->size)) {
50                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
51                 /* Not exact match */
52                 if (!ulp_bitmap_is_ones(field->mask, field->size))
53                         ULP_COMP_FLD_IDX_WR(params,
54                                             BNXT_ULP_CF_IDX_WC_MATCH, 1);
55         } else {
56                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
57         }
58 }
59
60 /* Utility function to copy field spec items */
61 static struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
63                         const void *buffer,
64                         uint32_t size)
65 {
66         field->size = size;
67         memcpy(field->spec, buffer, field->size);
68         field++;
69         return field;
70 }
71
72 /* Utility function to copy field masks items */
73 static void
74 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
75                        uint32_t *idx,
76                        const void *buffer,
77                        uint32_t size)
78 {
79         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
80
81         memcpy(field->mask, buffer, size);
82         ulp_rte_parser_field_bitmap_update(params, *idx);
83         *idx = *idx + 1;
84 }
85
86 /* Utility function to ignore field masks items */
87 static void
88 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
89                          uint32_t *idx,
90                          const void *buffer __rte_unused,
91                          uint32_t size __rte_unused)
92 {
93         *idx = *idx + 1;
94 }
95
96 /*
97  * Function to handle the parsing of RTE Flows and placing
98  * the RTE flow items into the ulp structures.
99  */
100 int32_t
101 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
102                               struct ulp_rte_parser_params *params)
103 {
104         const struct rte_flow_item *item = pattern;
105         struct bnxt_ulp_rte_hdr_info *hdr_info;
106
107         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
108
109         /* Set the computed flags for no vlan tags before parsing */
110         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
111         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
112
113         /* Parse all the items in the pattern */
114         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
115                 /* get the header information from the flow_hdr_info table */
116                 hdr_info = &ulp_hdr_info[item->type];
117                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
118                         BNXT_TF_DBG(ERR,
119                                     "Truflow parser does not support type %d\n",
120                                     item->type);
121                         return BNXT_TF_RC_PARSE_ERR;
122                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
123                         /* call the registered callback handler */
124                         if (hdr_info->proto_hdr_func) {
125                                 if (hdr_info->proto_hdr_func(item, params) !=
126                                     BNXT_TF_RC_SUCCESS) {
127                                         return BNXT_TF_RC_ERROR;
128                                 }
129                         }
130                 }
131                 item++;
132         }
133         /* update the implied SVIF */
134         return ulp_rte_parser_implicit_match_port_process(params);
135 }
136
137 /*
138  * Function to handle the parsing of RTE Flows and placing
139  * the RTE flow actions into the ulp structures.
140  */
141 int32_t
142 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
143                               struct ulp_rte_parser_params *params)
144 {
145         const struct rte_flow_action *action_item = actions;
146         struct bnxt_ulp_rte_act_info *hdr_info;
147
148         /* Parse all the items in the pattern */
149         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
150                 /* get the header information from the flow_hdr_info table */
151                 hdr_info = &ulp_act_info[action_item->type];
152                 if (hdr_info->act_type ==
153                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
154                         BNXT_TF_DBG(ERR,
155                                     "Truflow parser does not support act %u\n",
156                                     action_item->type);
157                         return BNXT_TF_RC_ERROR;
158                 } else if (hdr_info->act_type ==
159                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
160                         /* call the registered callback handler */
161                         if (hdr_info->proto_act_func) {
162                                 if (hdr_info->proto_act_func(action_item,
163                                                              params) !=
164                                     BNXT_TF_RC_SUCCESS) {
165                                         return BNXT_TF_RC_ERROR;
166                                 }
167                         }
168                 }
169                 action_item++;
170         }
171         /* update the implied port details */
172         ulp_rte_parser_implicit_act_port_process(params);
173         return BNXT_TF_RC_SUCCESS;
174 }
175
176 /*
177  * Function to handle the post processing of the computed
178  * fields for the interface.
179  */
180 static void
181 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
182 {
183         uint32_t ifindex;
184         uint16_t port_id, parif;
185         uint32_t mtype;
186         enum bnxt_ulp_direction_type dir;
187
188         /* get the direction details */
189         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
190
191         /* read the port id details */
192         port_id = ULP_COMP_FLD_IDX_RD(params,
193                                       BNXT_ULP_CF_IDX_INCOMING_IF);
194         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
195                                               port_id,
196                                               &ifindex)) {
197                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
198                 return;
199         }
200
201         if (dir == BNXT_ULP_DIR_INGRESS) {
202                 /* Set port PARIF */
203                 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
204                                           BNXT_ULP_PHY_PORT_PARIF, &parif)) {
205                         BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
206                         return;
207                 }
208                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
209                                     parif);
210         } else {
211                 /* Get the match port type */
212                 mtype = ULP_COMP_FLD_IDX_RD(params,
213                                             BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
214                 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
215                         ULP_COMP_FLD_IDX_WR(params,
216                                             BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
217                                             1);
218                         /* Set VF func PARIF */
219                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
220                                                   BNXT_ULP_VF_FUNC_PARIF,
221                                                   &parif)) {
222                                 BNXT_TF_DBG(ERR,
223                                             "ParseErr:ifindex is not valid\n");
224                                 return;
225                         }
226                         ULP_COMP_FLD_IDX_WR(params,
227                                             BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
228                                             parif);
229
230                 } else {
231                         /* Set DRV func PARIF */
232                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
233                                                   BNXT_ULP_DRV_FUNC_PARIF,
234                                                   &parif)) {
235                                 BNXT_TF_DBG(ERR,
236                                             "ParseErr:ifindex is not valid\n");
237                                 return;
238                         }
239                         ULP_COMP_FLD_IDX_WR(params,
240                                             BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
241                                             parif);
242                 }
243         }
244 }
245
246 static int32_t
247 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
248 {
249         enum bnxt_ulp_intf_type match_port_type, act_port_type;
250         enum bnxt_ulp_direction_type dir;
251         uint32_t act_port_set;
252
253         /* Get the computed details */
254         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
255         match_port_type = ULP_COMP_FLD_IDX_RD(params,
256                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
257         act_port_type = ULP_COMP_FLD_IDX_RD(params,
258                                             BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
259         act_port_set = ULP_COMP_FLD_IDX_RD(params,
260                                            BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
261
262         /* set the flow direction in the proto and action header */
263         if (dir == BNXT_ULP_DIR_EGRESS) {
264                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
265                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
266                 ULP_BITMAP_SET(params->act_bitmap.bits,
267                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
268         }
269
270         /* calculate the VF to VF flag */
271         if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
272             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
273                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
274
275         /* Update the decrement ttl computational fields */
276         if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
277                              BNXT_ULP_ACT_BIT_DEC_TTL)) {
278                 /*
279                  * Check that vxlan proto is included and vxlan decap
280                  * action is not set then decrement tunnel ttl.
281                  * Similarly add GRE and NVGRE in future.
282                  */
283                 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
284                                       BNXT_ULP_HDR_BIT_T_VXLAN) &&
285                     !ULP_BITMAP_ISSET(params->act_bitmap.bits,
286                                       BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
287                         ULP_COMP_FLD_IDX_WR(params,
288                                             BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
289                 } else {
290                         ULP_COMP_FLD_IDX_WR(params,
291                                             BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
292                 }
293         }
294
295         /* Merge the hdr_fp_bit into the proto header bit */
296         params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
297
298         /* Update the comp fld fid */
299         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
300
301         /* Update the computed interface parameters */
302         bnxt_ulp_comp_fld_intf_update(params);
303
304         /* TBD: Handle the flow rejection scenarios */
305         return 0;
306 }
307
308 /*
309  * Function to handle the post processing of the parsing details
310  */
311 int32_t
312 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
313 {
314         ulp_post_process_normal_flow(params);
315         return ulp_post_process_tun_flow(params);
316 }
317
318 /*
319  * Function to compute the flow direction based on the match port details
320  */
321 static void
322 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
323 {
324         enum bnxt_ulp_intf_type match_port_type;
325
326         /* Get the match port type */
327         match_port_type = ULP_COMP_FLD_IDX_RD(params,
328                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
329
330         /* If ingress flow and matchport is vf rep then dir is egress*/
331         if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
332             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
333                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
334                                     BNXT_ULP_DIR_EGRESS);
335         } else {
336                 /* Assign the input direction */
337                 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
338                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
339                                             BNXT_ULP_DIR_INGRESS);
340                 else
341                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
342                                             BNXT_ULP_DIR_EGRESS);
343         }
344 }
345
346 /* Function to handle the parsing of RTE Flow item PF Header. */
347 static int32_t
348 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
349                         uint32_t ifindex,
350                         uint16_t mask)
351 {
352         uint16_t svif;
353         enum bnxt_ulp_direction_type dir;
354         struct ulp_rte_hdr_field *hdr_field;
355         enum bnxt_ulp_svif_type svif_type;
356         enum bnxt_ulp_intf_type port_type;
357
358         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
359             BNXT_ULP_INVALID_SVIF_VAL) {
360                 BNXT_TF_DBG(ERR,
361                             "SVIF already set,multiple source not support'd\n");
362                 return BNXT_TF_RC_ERROR;
363         }
364
365         /* Get port type details */
366         port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
367         if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
368                 BNXT_TF_DBG(ERR, "Invalid port type\n");
369                 return BNXT_TF_RC_ERROR;
370         }
371
372         /* Update the match port type */
373         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
374
375         /* compute the direction */
376         bnxt_ulp_rte_parser_direction_compute(params);
377
378         /* Get the computed direction */
379         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
380         if (dir == BNXT_ULP_DIR_INGRESS) {
381                 svif_type = BNXT_ULP_PHY_PORT_SVIF;
382         } else {
383                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
384                         svif_type = BNXT_ULP_VF_FUNC_SVIF;
385                 else
386                         svif_type = BNXT_ULP_DRV_FUNC_SVIF;
387         }
388         ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
389                              &svif);
390         svif = rte_cpu_to_be_16(svif);
391         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
392         memcpy(hdr_field->spec, &svif, sizeof(svif));
393         memcpy(hdr_field->mask, &mask, sizeof(mask));
394         hdr_field->size = sizeof(svif);
395         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
396                             rte_be_to_cpu_16(svif));
397         return BNXT_TF_RC_SUCCESS;
398 }
399
400 /* Function to handle the parsing of the RTE port id */
401 int32_t
402 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
403 {
404         uint16_t port_id = 0;
405         uint16_t svif_mask = 0xFFFF;
406         uint32_t ifindex;
407         int32_t rc = BNXT_TF_RC_ERROR;
408
409         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
410             BNXT_ULP_INVALID_SVIF_VAL)
411                 return BNXT_TF_RC_SUCCESS;
412
413         /* SVIF not set. So get the port id */
414         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
415
416         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
417                                               port_id,
418                                               &ifindex)) {
419                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
420                 return rc;
421         }
422
423         /* Update the SVIF details */
424         rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
425         return rc;
426 }
427
428 /* Function to handle the implicit action port id */
429 int32_t
430 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
431 {
432         struct rte_flow_action action_item = {0};
433         struct rte_flow_action_port_id port_id = {0};
434
435         /* Read the action port set bit */
436         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
437                 /* Already set, so just exit */
438                 return BNXT_TF_RC_SUCCESS;
439         }
440         port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
441         action_item.conf = &port_id;
442
443         /* Update the action port based on incoming port */
444         ulp_rte_port_id_act_handler(&action_item, params);
445
446         /* Reset the action port set bit */
447         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
448         return BNXT_TF_RC_SUCCESS;
449 }
450
451 /* Function to handle the parsing of RTE Flow item PF Header. */
452 int32_t
453 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
454                        struct ulp_rte_parser_params *params)
455 {
456         uint16_t port_id = 0;
457         uint16_t svif_mask = 0xFFFF;
458         uint32_t ifindex;
459
460         /* Get the implicit port id */
461         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
462
463         /* perform the conversion from dpdk port to bnxt ifindex */
464         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
465                                               port_id,
466                                               &ifindex)) {
467                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
468                 return BNXT_TF_RC_ERROR;
469         }
470
471         /* Update the SVIF details */
472         return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
473 }
474
475 /* Function to handle the parsing of RTE Flow item VF Header. */
476 int32_t
477 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
478                        struct ulp_rte_parser_params *params)
479 {
480         const struct rte_flow_item_vf *vf_spec = item->spec;
481         const struct rte_flow_item_vf *vf_mask = item->mask;
482         uint16_t mask = 0;
483         uint32_t ifindex;
484         int32_t rc = BNXT_TF_RC_PARSE_ERR;
485
486         /* Get VF rte_flow_item for Port details */
487         if (!vf_spec) {
488                 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
489                 return rc;
490         }
491         if (!vf_mask) {
492                 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
493                 return rc;
494         }
495         mask = vf_mask->id;
496
497         /* perform the conversion from VF Func id to bnxt ifindex */
498         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
499                                                  vf_spec->id,
500                                                  &ifindex)) {
501                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
502                 return rc;
503         }
504         /* Update the SVIF details */
505         return ulp_rte_parser_svif_set(params, ifindex, mask);
506 }
507
508 /* Function to handle the parsing of RTE Flow item port id  Header. */
509 int32_t
510 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
511                             struct ulp_rte_parser_params *params)
512 {
513         const struct rte_flow_item_port_id *port_spec = item->spec;
514         const struct rte_flow_item_port_id *port_mask = item->mask;
515         uint16_t mask = 0;
516         int32_t rc = BNXT_TF_RC_PARSE_ERR;
517         uint32_t ifindex;
518
519         if (!port_spec) {
520                 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
521                 return rc;
522         }
523         if (!port_mask) {
524                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
525                 return rc;
526         }
527         mask = port_mask->id;
528
529         /* perform the conversion from dpdk port to bnxt ifindex */
530         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
531                                               port_spec->id,
532                                               &ifindex)) {
533                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
534                 return rc;
535         }
536         /* Update the SVIF details */
537         return ulp_rte_parser_svif_set(params, ifindex, mask);
538 }
539
540 /* Function to handle the parsing of RTE Flow item phy port Header. */
541 int32_t
542 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
543                              struct ulp_rte_parser_params *params)
544 {
545         const struct rte_flow_item_phy_port *port_spec = item->spec;
546         const struct rte_flow_item_phy_port *port_mask = item->mask;
547         uint16_t mask = 0;
548         int32_t rc = BNXT_TF_RC_ERROR;
549         uint16_t svif;
550         enum bnxt_ulp_direction_type dir;
551         struct ulp_rte_hdr_field *hdr_field;
552
553         /* Copy the rte_flow_item for phy port into hdr_field */
554         if (!port_spec) {
555                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
556                 return rc;
557         }
558         if (!port_mask) {
559                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
560                 return rc;
561         }
562         mask = port_mask->index;
563
564         /* Update the match port type */
565         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
566                             BNXT_ULP_INTF_TYPE_PHY_PORT);
567
568         /* Compute the Hw direction */
569         bnxt_ulp_rte_parser_direction_compute(params);
570
571         /* Direction validation */
572         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
573         if (dir == BNXT_ULP_DIR_EGRESS) {
574                 BNXT_TF_DBG(ERR,
575                             "Parse Err:Phy ports are valid only for ingress\n");
576                 return BNXT_TF_RC_PARSE_ERR;
577         }
578
579         /* Get the physical port details from port db */
580         rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
581                                            &svif);
582         if (rc) {
583                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
584                 return BNXT_TF_RC_PARSE_ERR;
585         }
586
587         /* Update the SVIF details */
588         svif = rte_cpu_to_be_16(svif);
589         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
590         memcpy(hdr_field->spec, &svif, sizeof(svif));
591         memcpy(hdr_field->mask, &mask, sizeof(mask));
592         hdr_field->size = sizeof(svif);
593         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
594                             rte_be_to_cpu_16(svif));
595         return BNXT_TF_RC_SUCCESS;
596 }
597
598 /* Function to handle the update of proto header based on field values */
599 static void
600 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
601                              uint16_t type, uint32_t in_flag)
602 {
603         if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
604                 if (in_flag) {
605                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
606                                        BNXT_ULP_HDR_BIT_I_IPV4);
607                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
608                 } else {
609                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
610                                        BNXT_ULP_HDR_BIT_O_IPV4);
611                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
612                 }
613         } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
614                 if (in_flag) {
615                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
616                                        BNXT_ULP_HDR_BIT_I_IPV6);
617                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
618                 } else {
619                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
620                                        BNXT_ULP_HDR_BIT_O_IPV6);
621                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
622                 }
623         }
624 }
625
626 /* Internal Function to identify broadcast or multicast packets */
627 static int32_t
628 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
629 {
630         if (rte_is_multicast_ether_addr(eth_addr) ||
631             rte_is_broadcast_ether_addr(eth_addr)) {
632                 BNXT_TF_DBG(DEBUG,
633                             "No support for bcast or mcast addr offload\n");
634                 return 1;
635         }
636         return 0;
637 }
638
639 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
640 int32_t
641 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
642                         struct ulp_rte_parser_params *params)
643 {
644         const struct rte_flow_item_eth *eth_spec = item->spec;
645         const struct rte_flow_item_eth *eth_mask = item->mask;
646         struct ulp_rte_hdr_field *field;
647         uint32_t idx = params->field_idx;
648         uint32_t size;
649         uint16_t eth_type = 0;
650         uint32_t inner_flag = 0;
651
652         /*
653          * Copy the rte_flow_item for eth into hdr_field using ethernet
654          * header fields
655          */
656         if (eth_spec) {
657                 size = sizeof(eth_spec->dst.addr_bytes);
658                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
659                                                 eth_spec->dst.addr_bytes,
660                                                 size);
661                 /* Todo: work around to avoid multicast and broadcast addr */
662                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->dst))
663                         return BNXT_TF_RC_PARSE_ERR;
664
665                 size = sizeof(eth_spec->src.addr_bytes);
666                 field = ulp_rte_parser_fld_copy(field,
667                                                 eth_spec->src.addr_bytes,
668                                                 size);
669                 /* Todo: work around to avoid multicast and broadcast addr */
670                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->src))
671                         return BNXT_TF_RC_PARSE_ERR;
672
673                 field = ulp_rte_parser_fld_copy(field,
674                                                 &eth_spec->type,
675                                                 sizeof(eth_spec->type));
676                 eth_type = eth_spec->type;
677         }
678         if (eth_mask) {
679                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
680                                        sizeof(eth_mask->dst.addr_bytes));
681                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
682                                        sizeof(eth_mask->src.addr_bytes));
683                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
684                                        sizeof(eth_mask->type));
685         }
686         /* Add number of Eth header elements */
687         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
688
689         /* Update the protocol hdr bitmap */
690         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
691                              BNXT_ULP_HDR_BIT_O_ETH) ||
692             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
693                              BNXT_ULP_HDR_BIT_O_IPV4) ||
694             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
695                              BNXT_ULP_HDR_BIT_O_IPV6) ||
696             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697                              BNXT_ULP_HDR_BIT_O_UDP) ||
698             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699                              BNXT_ULP_HDR_BIT_O_TCP)) {
700                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
701                 inner_flag = 1;
702         } else {
703                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
704         }
705         /* Update the field protocol hdr bitmap */
706         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
707
708         return BNXT_TF_RC_SUCCESS;
709 }
710
711 /* Function to handle the parsing of RTE Flow item Vlan Header. */
712 int32_t
713 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
714                          struct ulp_rte_parser_params *params)
715 {
716         const struct rte_flow_item_vlan *vlan_spec = item->spec;
717         const struct rte_flow_item_vlan *vlan_mask = item->mask;
718         struct ulp_rte_hdr_field *field;
719         struct ulp_rte_hdr_bitmap       *hdr_bit;
720         uint32_t idx = params->field_idx;
721         uint16_t vlan_tag, priority;
722         uint32_t outer_vtag_num;
723         uint32_t inner_vtag_num;
724         uint16_t eth_type = 0;
725         uint32_t inner_flag = 0;
726
727         /*
728          * Copy the rte_flow_item for vlan into hdr_field using Vlan
729          * header fields
730          */
731         if (vlan_spec) {
732                 vlan_tag = ntohs(vlan_spec->tci);
733                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
734                 vlan_tag &= ULP_VLAN_TAG_MASK;
735                 vlan_tag = htons(vlan_tag);
736
737                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
738                                                 &priority,
739                                                 sizeof(priority));
740                 field = ulp_rte_parser_fld_copy(field,
741                                                 &vlan_tag,
742                                                 sizeof(vlan_tag));
743                 field = ulp_rte_parser_fld_copy(field,
744                                                 &vlan_spec->inner_type,
745                                                 sizeof(vlan_spec->inner_type));
746                 eth_type = vlan_spec->inner_type;
747         }
748
749         if (vlan_mask) {
750                 vlan_tag = ntohs(vlan_mask->tci);
751                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
752                 vlan_tag &= 0xfff;
753
754                 /*
755                  * the storage for priority and vlan tag is 2 bytes
756                  * The mask of priority which is 3 bits if it is all 1's
757                  * then make the rest bits 13 bits as 1's
758                  * so that it is matched as exact match.
759                  */
760                 if (priority == ULP_VLAN_PRIORITY_MASK)
761                         priority |= ~ULP_VLAN_PRIORITY_MASK;
762                 if (vlan_tag == ULP_VLAN_TAG_MASK)
763                         vlan_tag |= ~ULP_VLAN_TAG_MASK;
764                 vlan_tag = htons(vlan_tag);
765
766                 /*
767                  * The priority field is ignored since OVS is setting it as
768                  * wild card match and it is not supported. This is a work
769                  * around and shall be addressed in the future.
770                  */
771                 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
772                                          sizeof(priority));
773
774                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
775                                        sizeof(vlan_tag));
776                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
777                                        sizeof(vlan_mask->inner_type));
778         }
779         /* Set the field index to new incremented value */
780         params->field_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
781
782         /* Get the outer tag and inner tag counts */
783         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
784                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
785         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
786                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
787
788         /* Update the hdr_bitmap of the vlans */
789         hdr_bit = &params->hdr_bitmap;
790         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
791             !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
792             !outer_vtag_num) {
793                 /* Update the vlan tag num */
794                 outer_vtag_num++;
795                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
796                                     outer_vtag_num);
797                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
798                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
799                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
800                                BNXT_ULP_HDR_BIT_OO_VLAN);
801         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
802                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
803                    outer_vtag_num == 1) {
804                 /* update the vlan tag num */
805                 outer_vtag_num++;
806                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
807                                     outer_vtag_num);
808                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
809                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
810                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
811                                BNXT_ULP_HDR_BIT_OI_VLAN);
812         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
813                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
814                    !inner_vtag_num) {
815                 /* update the vlan tag num */
816                 inner_vtag_num++;
817                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
818                                     inner_vtag_num);
819                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
820                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
821                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
822                                BNXT_ULP_HDR_BIT_IO_VLAN);
823                 inner_flag = 1;
824         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
825                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
826                    inner_vtag_num == 1) {
827                 /* update the vlan tag num */
828                 inner_vtag_num++;
829                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
830                                     inner_vtag_num);
831                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
832                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
833                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
834                                BNXT_ULP_HDR_BIT_II_VLAN);
835                 inner_flag = 1;
836         } else {
837                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
838                 return BNXT_TF_RC_ERROR;
839         }
840         /* Update the field protocol hdr bitmap */
841         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
842         return BNXT_TF_RC_SUCCESS;
843 }
844
845 /* Function to handle the update of proto header based on field values */
846 static void
847 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
848                              uint8_t proto, uint32_t in_flag)
849 {
850         if (proto == IPPROTO_UDP) {
851                 if (in_flag) {
852                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
853                                        BNXT_ULP_HDR_BIT_I_UDP);
854                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
855                 } else {
856                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
857                                        BNXT_ULP_HDR_BIT_O_UDP);
858                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
859                 }
860         } else if (proto == IPPROTO_TCP) {
861                 if (in_flag) {
862                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
863                                        BNXT_ULP_HDR_BIT_I_TCP);
864                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
865                 } else {
866                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
867                                        BNXT_ULP_HDR_BIT_O_TCP);
868                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
869                 }
870         }
871 }
872
873 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
874 int32_t
875 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
876                          struct ulp_rte_parser_params *params)
877 {
878         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
879         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
880         struct ulp_rte_hdr_field *field;
881         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
882         uint32_t idx = params->field_idx;
883         uint32_t size;
884         uint8_t proto = 0;
885         uint32_t inner_flag = 0;
886         uint32_t cnt;
887
888         /* validate there are no 3rd L3 header */
889         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
890         if (cnt == 2) {
891                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
892                 return BNXT_TF_RC_ERROR;
893         }
894
895         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
896                               BNXT_ULP_HDR_BIT_O_ETH) &&
897             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
898                               BNXT_ULP_HDR_BIT_I_ETH)) {
899                 /* Since F2 flow does not include eth item, when parser detects
900                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
901                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
902                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
903                  * This will allow the parser post processor to update the
904                  * t_dmac in hdr_field[o_eth.dmac]
905                  */
906                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
907                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
908                 params->field_idx = idx;
909         }
910
911         /*
912          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
913          * header fields
914          */
915         if (ipv4_spec) {
916                 size = sizeof(ipv4_spec->hdr.version_ihl);
917                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
918                                                 &ipv4_spec->hdr.version_ihl,
919                                                 size);
920                 size = sizeof(ipv4_spec->hdr.type_of_service);
921                 field = ulp_rte_parser_fld_copy(field,
922                                                 &ipv4_spec->hdr.type_of_service,
923                                                 size);
924                 size = sizeof(ipv4_spec->hdr.total_length);
925                 field = ulp_rte_parser_fld_copy(field,
926                                                 &ipv4_spec->hdr.total_length,
927                                                 size);
928                 size = sizeof(ipv4_spec->hdr.packet_id);
929                 field = ulp_rte_parser_fld_copy(field,
930                                                 &ipv4_spec->hdr.packet_id,
931                                                 size);
932                 size = sizeof(ipv4_spec->hdr.fragment_offset);
933                 field = ulp_rte_parser_fld_copy(field,
934                                                 &ipv4_spec->hdr.fragment_offset,
935                                                 size);
936                 size = sizeof(ipv4_spec->hdr.time_to_live);
937                 field = ulp_rte_parser_fld_copy(field,
938                                                 &ipv4_spec->hdr.time_to_live,
939                                                 size);
940                 size = sizeof(ipv4_spec->hdr.next_proto_id);
941                 field = ulp_rte_parser_fld_copy(field,
942                                                 &ipv4_spec->hdr.next_proto_id,
943                                                 size);
944                 proto = ipv4_spec->hdr.next_proto_id;
945                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
946                 field = ulp_rte_parser_fld_copy(field,
947                                                 &ipv4_spec->hdr.hdr_checksum,
948                                                 size);
949                 size = sizeof(ipv4_spec->hdr.src_addr);
950                 field = ulp_rte_parser_fld_copy(field,
951                                                 &ipv4_spec->hdr.src_addr,
952                                                 size);
953                 size = sizeof(ipv4_spec->hdr.dst_addr);
954                 field = ulp_rte_parser_fld_copy(field,
955                                                 &ipv4_spec->hdr.dst_addr,
956                                                 size);
957         }
958         if (ipv4_mask) {
959                 ulp_rte_prsr_mask_copy(params, &idx,
960                                        &ipv4_mask->hdr.version_ihl,
961                                        sizeof(ipv4_mask->hdr.version_ihl));
962                 /*
963                  * The tos field is ignored since OVS is setting it as wild card
964                  * match and it is not supported. This is a work around and
965                  * shall be addressed in the future.
966                  */
967                 ulp_rte_prsr_mask_ignore(params, &idx,
968                                          &ipv4_mask->hdr.type_of_service,
969                                          sizeof(ipv4_mask->hdr.type_of_service)
970                                          );
971
972                 ulp_rte_prsr_mask_copy(params, &idx,
973                                        &ipv4_mask->hdr.total_length,
974                                        sizeof(ipv4_mask->hdr.total_length));
975                 ulp_rte_prsr_mask_copy(params, &idx,
976                                        &ipv4_mask->hdr.packet_id,
977                                        sizeof(ipv4_mask->hdr.packet_id));
978                 ulp_rte_prsr_mask_copy(params, &idx,
979                                        &ipv4_mask->hdr.fragment_offset,
980                                        sizeof(ipv4_mask->hdr.fragment_offset));
981                 ulp_rte_prsr_mask_copy(params, &idx,
982                                        &ipv4_mask->hdr.time_to_live,
983                                        sizeof(ipv4_mask->hdr.time_to_live));
984                 ulp_rte_prsr_mask_copy(params, &idx,
985                                        &ipv4_mask->hdr.next_proto_id,
986                                        sizeof(ipv4_mask->hdr.next_proto_id));
987                 ulp_rte_prsr_mask_copy(params, &idx,
988                                        &ipv4_mask->hdr.hdr_checksum,
989                                        sizeof(ipv4_mask->hdr.hdr_checksum));
990                 ulp_rte_prsr_mask_copy(params, &idx,
991                                        &ipv4_mask->hdr.src_addr,
992                                        sizeof(ipv4_mask->hdr.src_addr));
993                 ulp_rte_prsr_mask_copy(params, &idx,
994                                        &ipv4_mask->hdr.dst_addr,
995                                        sizeof(ipv4_mask->hdr.dst_addr));
996         }
997         /* Add the number of ipv4 header elements */
998         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
999
1000         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1001         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1002             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1003                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1004                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1005                 inner_flag = 1;
1006         } else {
1007                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1008                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1009         }
1010
1011         if (proto == IPPROTO_GRE)
1012                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1013
1014         /* Update the field protocol hdr bitmap */
1015         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1016         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1017         return BNXT_TF_RC_SUCCESS;
1018 }
1019
1020 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1021 int32_t
1022 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1023                          struct ulp_rte_parser_params *params)
1024 {
1025         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1026         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1027         struct ulp_rte_hdr_field *field;
1028         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1029         uint32_t idx = params->field_idx;
1030         uint32_t size;
1031         uint32_t vtcf, vtcf_mask;
1032         uint8_t proto = 0;
1033         uint32_t inner_flag = 0;
1034         uint32_t cnt;
1035
1036         /* validate there are no 3rd L3 header */
1037         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1038         if (cnt == 2) {
1039                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1040                 return BNXT_TF_RC_ERROR;
1041         }
1042
1043         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1044                               BNXT_ULP_HDR_BIT_O_ETH) &&
1045             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1046                               BNXT_ULP_HDR_BIT_I_ETH)) {
1047                 /* Since F2 flow does not include eth item, when parser detects
1048                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1049                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1050                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1051                  * This will allow the parser post processor to update the
1052                  * t_dmac in hdr_field[o_eth.dmac]
1053                  */
1054                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1055                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
1056                 params->field_idx = idx;
1057         }
1058
1059         /*
1060          * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1061          * header fields
1062          */
1063         if (ipv6_spec) {
1064                 size = sizeof(ipv6_spec->hdr.vtc_flow);
1065
1066                 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1067                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1068                                                 &vtcf,
1069                                                 size);
1070
1071                 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1072                 field = ulp_rte_parser_fld_copy(field,
1073                                                 &vtcf,
1074                                                 size);
1075
1076                 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1077                 field = ulp_rte_parser_fld_copy(field,
1078                                                 &vtcf,
1079                                                 size);
1080
1081                 size = sizeof(ipv6_spec->hdr.payload_len);
1082                 field = ulp_rte_parser_fld_copy(field,
1083                                                 &ipv6_spec->hdr.payload_len,
1084                                                 size);
1085                 size = sizeof(ipv6_spec->hdr.proto);
1086                 field = ulp_rte_parser_fld_copy(field,
1087                                                 &ipv6_spec->hdr.proto,
1088                                                 size);
1089                 proto = ipv6_spec->hdr.proto;
1090                 size = sizeof(ipv6_spec->hdr.hop_limits);
1091                 field = ulp_rte_parser_fld_copy(field,
1092                                                 &ipv6_spec->hdr.hop_limits,
1093                                                 size);
1094                 size = sizeof(ipv6_spec->hdr.src_addr);
1095                 field = ulp_rte_parser_fld_copy(field,
1096                                                 &ipv6_spec->hdr.src_addr,
1097                                                 size);
1098                 size = sizeof(ipv6_spec->hdr.dst_addr);
1099                 field = ulp_rte_parser_fld_copy(field,
1100                                                 &ipv6_spec->hdr.dst_addr,
1101                                                 size);
1102         }
1103         if (ipv6_mask) {
1104                 size = sizeof(ipv6_mask->hdr.vtc_flow);
1105
1106                 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1107                 ulp_rte_prsr_mask_copy(params, &idx,
1108                                        &vtcf_mask,
1109                                        size);
1110                 /*
1111                  * The TC and flow label field are ignored since OVS is setting
1112                  * it for match and it is not supported.
1113                  * This is a work around and
1114                  * shall be addressed in the future.
1115                  */
1116                 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1117                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1118                 vtcf_mask =
1119                         BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1120                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1121
1122                 ulp_rte_prsr_mask_copy(params, &idx,
1123                                        &ipv6_mask->hdr.payload_len,
1124                                        sizeof(ipv6_mask->hdr.payload_len));
1125                 ulp_rte_prsr_mask_copy(params, &idx,
1126                                        &ipv6_mask->hdr.proto,
1127                                        sizeof(ipv6_mask->hdr.proto));
1128                 ulp_rte_prsr_mask_copy(params, &idx,
1129                                        &ipv6_mask->hdr.hop_limits,
1130                                        sizeof(ipv6_mask->hdr.hop_limits));
1131                 ulp_rte_prsr_mask_copy(params, &idx,
1132                                        &ipv6_mask->hdr.src_addr,
1133                                        sizeof(ipv6_mask->hdr.src_addr));
1134                 ulp_rte_prsr_mask_copy(params, &idx,
1135                                        &ipv6_mask->hdr.dst_addr,
1136                                        sizeof(ipv6_mask->hdr.dst_addr));
1137         }
1138         /* add number of ipv6 header elements */
1139         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1140
1141         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1142         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1143             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1144                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1145                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1146                 inner_flag = 1;
1147         } else {
1148                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1149                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1150         }
1151
1152         if (proto == IPPROTO_GRE)
1153                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1154
1155         /* Update the field protocol hdr bitmap */
1156         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1157         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1158
1159         return BNXT_TF_RC_SUCCESS;
1160 }
1161
1162 /* Function to handle the update of proto header based on field values */
1163 static void
1164 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1165                              uint16_t dst_port)
1166 {
1167         if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1168                 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1169                                BNXT_ULP_HDR_BIT_T_VXLAN);
1170                 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1171         }
1172 }
1173
1174 /* Function to handle the parsing of RTE Flow item UDP Header. */
1175 int32_t
1176 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1177                         struct ulp_rte_parser_params *params)
1178 {
1179         const struct rte_flow_item_udp *udp_spec = item->spec;
1180         const struct rte_flow_item_udp *udp_mask = item->mask;
1181         struct ulp_rte_hdr_field *field;
1182         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1183         uint32_t idx = params->field_idx;
1184         uint32_t size;
1185         uint16_t dport = 0, sport = 0;
1186         uint32_t cnt;
1187
1188         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1189         if (cnt == 2) {
1190                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1191                 return BNXT_TF_RC_ERROR;
1192         }
1193
1194         /*
1195          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1196          * header fields
1197          */
1198         if (udp_spec) {
1199                 size = sizeof(udp_spec->hdr.src_port);
1200                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1201                                                 &udp_spec->hdr.src_port,
1202                                                 size);
1203                 sport = udp_spec->hdr.src_port;
1204                 size = sizeof(udp_spec->hdr.dst_port);
1205                 field = ulp_rte_parser_fld_copy(field,
1206                                                 &udp_spec->hdr.dst_port,
1207                                                 size);
1208                 dport = udp_spec->hdr.dst_port;
1209                 size = sizeof(udp_spec->hdr.dgram_len);
1210                 field = ulp_rte_parser_fld_copy(field,
1211                                                 &udp_spec->hdr.dgram_len,
1212                                                 size);
1213                 size = sizeof(udp_spec->hdr.dgram_cksum);
1214                 field = ulp_rte_parser_fld_copy(field,
1215                                                 &udp_spec->hdr.dgram_cksum,
1216                                                 size);
1217         }
1218         if (udp_mask) {
1219                 ulp_rte_prsr_mask_copy(params, &idx,
1220                                        &udp_mask->hdr.src_port,
1221                                        sizeof(udp_mask->hdr.src_port));
1222                 ulp_rte_prsr_mask_copy(params, &idx,
1223                                        &udp_mask->hdr.dst_port,
1224                                        sizeof(udp_mask->hdr.dst_port));
1225                 ulp_rte_prsr_mask_copy(params, &idx,
1226                                        &udp_mask->hdr.dgram_len,
1227                                        sizeof(udp_mask->hdr.dgram_len));
1228                 ulp_rte_prsr_mask_copy(params, &idx,
1229                                        &udp_mask->hdr.dgram_cksum,
1230                                        sizeof(udp_mask->hdr.dgram_cksum));
1231         }
1232
1233         /* Add number of UDP header elements */
1234         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1235
1236         /* Set the udp header bitmap and computed l4 header bitmaps */
1237         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1238             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1239                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1240                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1241                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
1242                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
1243
1244         } else {
1245                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1246                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1247                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
1248                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
1249
1250                 /* Update the field protocol hdr bitmap */
1251                 ulp_rte_l4_proto_type_update(params, dport);
1252         }
1253         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1254         return BNXT_TF_RC_SUCCESS;
1255 }
1256
1257 /* Function to handle the parsing of RTE Flow item TCP Header. */
1258 int32_t
1259 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1260                         struct ulp_rte_parser_params *params)
1261 {
1262         const struct rte_flow_item_tcp *tcp_spec = item->spec;
1263         const struct rte_flow_item_tcp *tcp_mask = item->mask;
1264         struct ulp_rte_hdr_field *field;
1265         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1266         uint32_t idx = params->field_idx;
1267         uint16_t dport = 0, sport = 0;
1268         uint32_t size;
1269         uint32_t cnt;
1270
1271         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1272         if (cnt == 2) {
1273                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1274                 return BNXT_TF_RC_ERROR;
1275         }
1276
1277         /*
1278          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1279          * header fields
1280          */
1281         if (tcp_spec) {
1282                 sport = tcp_spec->hdr.src_port;
1283                 size = sizeof(tcp_spec->hdr.src_port);
1284                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1285                                                 &tcp_spec->hdr.src_port,
1286                                                 size);
1287                 dport = tcp_spec->hdr.dst_port;
1288                 size = sizeof(tcp_spec->hdr.dst_port);
1289                 field = ulp_rte_parser_fld_copy(field,
1290                                                 &tcp_spec->hdr.dst_port,
1291                                                 size);
1292                 size = sizeof(tcp_spec->hdr.sent_seq);
1293                 field = ulp_rte_parser_fld_copy(field,
1294                                                 &tcp_spec->hdr.sent_seq,
1295                                                 size);
1296                 size = sizeof(tcp_spec->hdr.recv_ack);
1297                 field = ulp_rte_parser_fld_copy(field,
1298                                                 &tcp_spec->hdr.recv_ack,
1299                                                 size);
1300                 size = sizeof(tcp_spec->hdr.data_off);
1301                 field = ulp_rte_parser_fld_copy(field,
1302                                                 &tcp_spec->hdr.data_off,
1303                                                 size);
1304                 size = sizeof(tcp_spec->hdr.tcp_flags);
1305                 field = ulp_rte_parser_fld_copy(field,
1306                                                 &tcp_spec->hdr.tcp_flags,
1307                                                 size);
1308                 size = sizeof(tcp_spec->hdr.rx_win);
1309                 field = ulp_rte_parser_fld_copy(field,
1310                                                 &tcp_spec->hdr.rx_win,
1311                                                 size);
1312                 size = sizeof(tcp_spec->hdr.cksum);
1313                 field = ulp_rte_parser_fld_copy(field,
1314                                                 &tcp_spec->hdr.cksum,
1315                                                 size);
1316                 size = sizeof(tcp_spec->hdr.tcp_urp);
1317                 field = ulp_rte_parser_fld_copy(field,
1318                                                 &tcp_spec->hdr.tcp_urp,
1319                                                 size);
1320         } else {
1321                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1322         }
1323
1324         if (tcp_mask) {
1325                 ulp_rte_prsr_mask_copy(params, &idx,
1326                                        &tcp_mask->hdr.src_port,
1327                                        sizeof(tcp_mask->hdr.src_port));
1328                 ulp_rte_prsr_mask_copy(params, &idx,
1329                                        &tcp_mask->hdr.dst_port,
1330                                        sizeof(tcp_mask->hdr.dst_port));
1331                 ulp_rte_prsr_mask_copy(params, &idx,
1332                                        &tcp_mask->hdr.sent_seq,
1333                                        sizeof(tcp_mask->hdr.sent_seq));
1334                 ulp_rte_prsr_mask_copy(params, &idx,
1335                                        &tcp_mask->hdr.recv_ack,
1336                                        sizeof(tcp_mask->hdr.recv_ack));
1337                 ulp_rte_prsr_mask_copy(params, &idx,
1338                                        &tcp_mask->hdr.data_off,
1339                                        sizeof(tcp_mask->hdr.data_off));
1340                 ulp_rte_prsr_mask_copy(params, &idx,
1341                                        &tcp_mask->hdr.tcp_flags,
1342                                        sizeof(tcp_mask->hdr.tcp_flags));
1343                 ulp_rte_prsr_mask_copy(params, &idx,
1344                                        &tcp_mask->hdr.rx_win,
1345                                        sizeof(tcp_mask->hdr.rx_win));
1346                 ulp_rte_prsr_mask_copy(params, &idx,
1347                                        &tcp_mask->hdr.cksum,
1348                                        sizeof(tcp_mask->hdr.cksum));
1349                 ulp_rte_prsr_mask_copy(params, &idx,
1350                                        &tcp_mask->hdr.tcp_urp,
1351                                        sizeof(tcp_mask->hdr.tcp_urp));
1352         }
1353         /* add number of TCP header elements */
1354         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1355
1356         /* Set the udp header bitmap and computed l4 header bitmaps */
1357         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1358             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1359                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1360                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1361                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
1362                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
1363         } else {
1364                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1365                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1366                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
1367                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
1368         }
1369         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1370         return BNXT_TF_RC_SUCCESS;
1371 }
1372
1373 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1374 int32_t
1375 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1376                           struct ulp_rte_parser_params *params)
1377 {
1378         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1379         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1380         struct ulp_rte_hdr_field *field;
1381         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1382         uint32_t idx = params->field_idx;
1383         uint32_t size;
1384
1385         /*
1386          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1387          * header fields
1388          */
1389         if (vxlan_spec) {
1390                 size = sizeof(vxlan_spec->flags);
1391                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1392                                                 &vxlan_spec->flags,
1393                                                 size);
1394                 size = sizeof(vxlan_spec->rsvd0);
1395                 field = ulp_rte_parser_fld_copy(field,
1396                                                 &vxlan_spec->rsvd0,
1397                                                 size);
1398                 size = sizeof(vxlan_spec->vni);
1399                 field = ulp_rte_parser_fld_copy(field,
1400                                                 &vxlan_spec->vni,
1401                                                 size);
1402                 size = sizeof(vxlan_spec->rsvd1);
1403                 field = ulp_rte_parser_fld_copy(field,
1404                                                 &vxlan_spec->rsvd1,
1405                                                 size);
1406         }
1407         if (vxlan_mask) {
1408                 ulp_rte_prsr_mask_copy(params, &idx,
1409                                        &vxlan_mask->flags,
1410                                        sizeof(vxlan_mask->flags));
1411                 ulp_rte_prsr_mask_copy(params, &idx,
1412                                        &vxlan_mask->rsvd0,
1413                                        sizeof(vxlan_mask->rsvd0));
1414                 ulp_rte_prsr_mask_copy(params, &idx,
1415                                        &vxlan_mask->vni,
1416                                        sizeof(vxlan_mask->vni));
1417                 ulp_rte_prsr_mask_copy(params, &idx,
1418                                        &vxlan_mask->rsvd1,
1419                                        sizeof(vxlan_mask->rsvd1));
1420         }
1421         /* Add number of vxlan header elements */
1422         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1423
1424         /* Update the hdr_bitmap with vxlan */
1425         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1426         return BNXT_TF_RC_SUCCESS;
1427 }
1428
1429 /* Function to handle the parsing of RTE Flow item GRE Header. */
1430 int32_t
1431 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1432                           struct ulp_rte_parser_params *params)
1433 {
1434         const struct rte_flow_item_gre *gre_spec = item->spec;
1435         const struct rte_flow_item_gre *gre_mask = item->mask;
1436         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1437         uint32_t idx = params->field_idx;
1438         uint32_t size;
1439         struct ulp_rte_hdr_field *field;
1440
1441         if (!gre_spec && !gre_mask) {
1442                 BNXT_TF_DBG(ERR, "Parse Error: GRE item is invalid\n");
1443                 return BNXT_TF_RC_ERROR;
1444         }
1445
1446         if (gre_spec) {
1447                 size = sizeof(gre_spec->c_rsvd0_ver);
1448                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1449                                                 &gre_spec->c_rsvd0_ver,
1450                                                 size);
1451                 size = sizeof(gre_spec->protocol);
1452                 field = ulp_rte_parser_fld_copy(field,
1453                                                 &gre_spec->protocol,
1454                                                 size);
1455         }
1456         if (gre_mask) {
1457                 ulp_rte_prsr_mask_copy(params, &idx,
1458                                        &gre_mask->c_rsvd0_ver,
1459                                        sizeof(gre_mask->c_rsvd0_ver));
1460                 ulp_rte_prsr_mask_copy(params, &idx,
1461                                        &gre_mask->protocol,
1462                                        sizeof(gre_mask->protocol));
1463         }
1464         /* Add number of GRE header elements */
1465         params->field_idx += BNXT_ULP_PROTO_HDR_GRE_NUM;
1466
1467         /* Update the hdr_bitmap with GRE */
1468         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1469         return BNXT_TF_RC_SUCCESS;
1470 }
1471
1472 /* Function to handle the parsing of RTE Flow item ANY. */
1473 int32_t
1474 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1475                          struct ulp_rte_parser_params *params __rte_unused)
1476 {
1477         return BNXT_TF_RC_SUCCESS;
1478 }
1479
1480 /* Function to handle the parsing of RTE Flow item void Header */
1481 int32_t
1482 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1483                          struct ulp_rte_parser_params *params __rte_unused)
1484 {
1485         return BNXT_TF_RC_SUCCESS;
1486 }
1487
1488 /* Function to handle the parsing of RTE Flow action void Header. */
1489 int32_t
1490 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1491                          struct ulp_rte_parser_params *params __rte_unused)
1492 {
1493         return BNXT_TF_RC_SUCCESS;
1494 }
1495
1496 /* Function to handle the parsing of RTE Flow action Mark Header. */
1497 int32_t
1498 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1499                          struct ulp_rte_parser_params *param)
1500 {
1501         const struct rte_flow_action_mark *mark;
1502         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1503         uint32_t mark_id;
1504
1505         mark = action_item->conf;
1506         if (mark) {
1507                 mark_id = tfp_cpu_to_be_32(mark->id);
1508                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1509                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1510
1511                 /* Update the hdr_bitmap with vxlan */
1512                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1513                 return BNXT_TF_RC_SUCCESS;
1514         }
1515         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1516         return BNXT_TF_RC_ERROR;
1517 }
1518
1519 /* Function to handle the parsing of RTE Flow action RSS Header. */
1520 int32_t
1521 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1522                         struct ulp_rte_parser_params *param)
1523 {
1524         const struct rte_flow_action_rss *rss = action_item->conf;
1525
1526         if (rss) {
1527                 /* Update the hdr_bitmap with vxlan */
1528                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1529                 return BNXT_TF_RC_SUCCESS;
1530         }
1531         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1532         return BNXT_TF_RC_ERROR;
1533 }
1534
1535 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1536 int32_t
1537 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1538                                 struct ulp_rte_parser_params *params)
1539 {
1540         const struct rte_flow_action_vxlan_encap *vxlan_encap;
1541         const struct rte_flow_item *item;
1542         const struct rte_flow_item_eth *eth_spec;
1543         const struct rte_flow_item_ipv4 *ipv4_spec;
1544         const struct rte_flow_item_ipv6 *ipv6_spec;
1545         struct rte_flow_item_vxlan vxlan_spec;
1546         uint32_t vlan_num = 0, vlan_size = 0;
1547         uint32_t ip_size = 0, ip_type = 0;
1548         uint32_t vxlan_size = 0;
1549         uint8_t *buff;
1550         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1551         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1552                                     0x00, 0x40, 0x11};
1553         /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1554         const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1555                                 0x00, 0x11, 0xf6};
1556         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1557         struct ulp_rte_act_prop *ap = &params->act_prop;
1558         const uint8_t *tmp_buff;
1559
1560         vxlan_encap = action_item->conf;
1561         if (!vxlan_encap) {
1562                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1563                 return BNXT_TF_RC_ERROR;
1564         }
1565
1566         item = vxlan_encap->definition;
1567         if (!item) {
1568                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1569                 return BNXT_TF_RC_ERROR;
1570         }
1571
1572         if (!ulp_rte_item_skip_void(&item, 0))
1573                 return BNXT_TF_RC_ERROR;
1574
1575         /* must have ethernet header */
1576         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1577                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1578                 return BNXT_TF_RC_ERROR;
1579         }
1580         eth_spec = item->spec;
1581         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1582         ulp_encap_buffer_copy(buff,
1583                               eth_spec->dst.addr_bytes,
1584                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1585                               ULP_BUFFER_ALIGN_8_BYTE);
1586
1587         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1588         ulp_encap_buffer_copy(buff,
1589                               eth_spec->src.addr_bytes,
1590                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1591                               ULP_BUFFER_ALIGN_8_BYTE);
1592
1593         /* Goto the next item */
1594         if (!ulp_rte_item_skip_void(&item, 1))
1595                 return BNXT_TF_RC_ERROR;
1596
1597         /* May have vlan header */
1598         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1599                 vlan_num++;
1600                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1601                 ulp_encap_buffer_copy(buff,
1602                                       item->spec,
1603                                       sizeof(struct rte_flow_item_vlan),
1604                                       ULP_BUFFER_ALIGN_8_BYTE);
1605
1606                 if (!ulp_rte_item_skip_void(&item, 1))
1607                         return BNXT_TF_RC_ERROR;
1608         }
1609
1610         /* may have two vlan headers */
1611         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1612                 vlan_num++;
1613                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1614                        sizeof(struct rte_flow_item_vlan)],
1615                        item->spec,
1616                        sizeof(struct rte_flow_item_vlan));
1617                 if (!ulp_rte_item_skip_void(&item, 1))
1618                         return BNXT_TF_RC_ERROR;
1619         }
1620         /* Update the vlan count and size of more than one */
1621         if (vlan_num) {
1622                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1623                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1624                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1625                        &vlan_num,
1626                        sizeof(uint32_t));
1627                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1628                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1629                        &vlan_size,
1630                        sizeof(uint32_t));
1631         }
1632
1633         /* L3 must be IPv4, IPv6 */
1634         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1635                 ipv4_spec = item->spec;
1636                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1637
1638                 /* copy the ipv4 details */
1639                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1640                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1641                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1642                         ulp_encap_buffer_copy(buff,
1643                                               def_ipv4_hdr,
1644                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1645                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1646                                               ULP_BUFFER_ALIGN_8_BYTE);
1647                 } else {
1648                         /* Total length being ignored in the ip hdr. */
1649                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1650                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1651                         ulp_encap_buffer_copy(buff,
1652                                               tmp_buff,
1653                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1654                                               ULP_BUFFER_ALIGN_8_BYTE);
1655                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1656                              BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1657                         ulp_encap_buffer_copy(buff,
1658                                               &ipv4_spec->hdr.version_ihl,
1659                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1660                                               ULP_BUFFER_ALIGN_8_BYTE);
1661                 }
1662
1663                 /* Update the dst ip address in ip encap buffer */
1664                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1665                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1666                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1667                 ulp_encap_buffer_copy(buff,
1668                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1669                                       sizeof(ipv4_spec->hdr.dst_addr),
1670                                       ULP_BUFFER_ALIGN_8_BYTE);
1671
1672                 /* Update the src ip address */
1673                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1674                         BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1675                         sizeof(ipv4_spec->hdr.src_addr)];
1676                 ulp_encap_buffer_copy(buff,
1677                                       (const uint8_t *)&ipv4_spec->hdr.src_addr,
1678                                       sizeof(ipv4_spec->hdr.src_addr),
1679                                       ULP_BUFFER_ALIGN_8_BYTE);
1680
1681                 /* Update the ip size details */
1682                 ip_size = tfp_cpu_to_be_32(ip_size);
1683                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1684                        &ip_size, sizeof(uint32_t));
1685
1686                 /* update the ip type */
1687                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1688                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1689                        &ip_type, sizeof(uint32_t));
1690
1691                 /* update the computed field to notify it is ipv4 header */
1692                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1693                                     1);
1694
1695                 if (!ulp_rte_item_skip_void(&item, 1))
1696                         return BNXT_TF_RC_ERROR;
1697         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1698                 ipv6_spec = item->spec;
1699                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1700
1701                 /* copy the ipv6 details */
1702                 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1703                 if (ulp_buffer_is_empty(tmp_buff,
1704                                         BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1705                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1706                         ulp_encap_buffer_copy(buff,
1707                                               def_ipv6_hdr,
1708                                               sizeof(def_ipv6_hdr),
1709                                               ULP_BUFFER_ALIGN_8_BYTE);
1710                 } else {
1711                         /* The payload length being ignored in the ip hdr. */
1712                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1713                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1714                         ulp_encap_buffer_copy(buff,
1715                                               tmp_buff,
1716                                               BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1717                                               ULP_BUFFER_ALIGN_8_BYTE);
1718                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1719                                 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1720                                 BNXT_ULP_ENCAP_IPV6_DO];
1721                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1722                         ulp_encap_buffer_copy(buff,
1723                                               tmp_buff,
1724                                               BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1725                                               ULP_BUFFER_ALIGN_8_BYTE);
1726                 }
1727                 /* Update the dst ip address in ip encap buffer */
1728                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1729                         sizeof(def_ipv6_hdr)];
1730                 ulp_encap_buffer_copy(buff,
1731                                       (const uint8_t *)ipv6_spec->hdr.dst_addr,
1732                                       sizeof(ipv6_spec->hdr.dst_addr),
1733                                       ULP_BUFFER_ALIGN_8_BYTE);
1734
1735                 /* Update the src ip address */
1736                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1737                 ulp_encap_buffer_copy(buff,
1738                                       (const uint8_t *)ipv6_spec->hdr.src_addr,
1739                                       sizeof(ipv6_spec->hdr.src_addr),
1740                                       ULP_BUFFER_ALIGN_16_BYTE);
1741
1742                 /* Update the ip size details */
1743                 ip_size = tfp_cpu_to_be_32(ip_size);
1744                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1745                        &ip_size, sizeof(uint32_t));
1746
1747                  /* update the ip type */
1748                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1749                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1750                        &ip_type, sizeof(uint32_t));
1751
1752                 /* update the computed field to notify it is ipv6 header */
1753                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1754                                     1);
1755
1756                 if (!ulp_rte_item_skip_void(&item, 1))
1757                         return BNXT_TF_RC_ERROR;
1758         } else {
1759                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1760                 return BNXT_TF_RC_ERROR;
1761         }
1762
1763         /* L4 is UDP */
1764         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1765                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1766                 return BNXT_TF_RC_ERROR;
1767         }
1768         /* copy the udp details */
1769         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1770                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1771                               ULP_BUFFER_ALIGN_8_BYTE);
1772
1773         if (!ulp_rte_item_skip_void(&item, 1))
1774                 return BNXT_TF_RC_ERROR;
1775
1776         /* Finally VXLAN */
1777         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1778                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1779                 return BNXT_TF_RC_ERROR;
1780         }
1781         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1782         /* copy the vxlan details */
1783         memcpy(&vxlan_spec, item->spec, vxlan_size);
1784         vxlan_spec.flags = 0x08;
1785         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1786         if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1787                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1788                                       vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1789         } else {
1790                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1791                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1792                 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1793                                       (const uint8_t *)&vxlan_spec.vni,
1794                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1795         }
1796         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1797         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1798                &vxlan_size, sizeof(uint32_t));
1799
1800         /* update the hdr_bitmap with vxlan */
1801         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1802         return BNXT_TF_RC_SUCCESS;
1803 }
1804
1805 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1806 int32_t
1807 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1808                                 __rte_unused,
1809                                 struct ulp_rte_parser_params *params)
1810 {
1811         /* update the hdr_bitmap with vxlan */
1812         ULP_BITMAP_SET(params->act_bitmap.bits,
1813                        BNXT_ULP_ACT_BIT_VXLAN_DECAP);
1814         /* Update computational field with tunnel decap info */
1815         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1816         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1817         return BNXT_TF_RC_SUCCESS;
1818 }
1819
1820 /* Function to handle the parsing of RTE Flow action drop Header. */
1821 int32_t
1822 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1823                          struct ulp_rte_parser_params *params)
1824 {
1825         /* Update the hdr_bitmap with drop */
1826         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
1827         return BNXT_TF_RC_SUCCESS;
1828 }
1829
1830 /* Function to handle the parsing of RTE Flow action count. */
1831 int32_t
1832 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1833                           struct ulp_rte_parser_params *params)
1834
1835 {
1836         const struct rte_flow_action_count *act_count;
1837         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1838
1839         act_count = action_item->conf;
1840         if (act_count) {
1841                 if (act_count->shared) {
1842                         BNXT_TF_DBG(ERR,
1843                                     "Parse Error:Shared count not supported\n");
1844                         return BNXT_TF_RC_PARSE_ERR;
1845                 }
1846                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1847                        &act_count->id,
1848                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1849         }
1850
1851         /* Update the hdr_bitmap with count */
1852         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
1853         return BNXT_TF_RC_SUCCESS;
1854 }
1855
1856 /* Function to handle the parsing of action ports. */
1857 static int32_t
1858 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1859                             uint32_t ifindex)
1860 {
1861         enum bnxt_ulp_direction_type dir;
1862         uint16_t pid_s;
1863         uint32_t pid;
1864         struct ulp_rte_act_prop *act = &param->act_prop;
1865         enum bnxt_ulp_intf_type port_type;
1866         uint32_t vnic_type;
1867
1868         /* Get the direction */
1869         dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1870         if (dir == BNXT_ULP_DIR_EGRESS) {
1871                 /* For egress direction, fill vport */
1872                 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1873                         return BNXT_TF_RC_ERROR;
1874
1875                 pid = pid_s;
1876                 pid = rte_cpu_to_be_32(pid);
1877                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1878                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1879         } else {
1880                 /* For ingress direction, fill vnic */
1881                 port_type = ULP_COMP_FLD_IDX_RD(param,
1882                                                 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1883                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1884                         vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1885                 else
1886                         vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1887
1888                 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1889                                                  vnic_type, &pid_s))
1890                         return BNXT_TF_RC_ERROR;
1891
1892                 pid = pid_s;
1893                 pid = rte_cpu_to_be_32(pid);
1894                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1895                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1896         }
1897
1898         /* Update the action port set bit */
1899         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1900         return BNXT_TF_RC_SUCCESS;
1901 }
1902
1903 /* Function to handle the parsing of RTE Flow action PF. */
1904 int32_t
1905 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1906                        struct ulp_rte_parser_params *params)
1907 {
1908         uint32_t port_id;
1909         uint32_t ifindex;
1910         enum bnxt_ulp_intf_type intf_type;
1911
1912         /* Get the port id of the current device */
1913         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1914
1915         /* Get the port db ifindex */
1916         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1917                                               &ifindex)) {
1918                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1919                 return BNXT_TF_RC_ERROR;
1920         }
1921
1922         /* Check the port is PF port */
1923         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1924         if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1925                 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1926                 return BNXT_TF_RC_ERROR;
1927         }
1928         /* Update the action properties */
1929         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1930         return ulp_rte_parser_act_port_set(params, ifindex);
1931 }
1932
1933 /* Function to handle the parsing of RTE Flow action VF. */
1934 int32_t
1935 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1936                        struct ulp_rte_parser_params *params)
1937 {
1938         const struct rte_flow_action_vf *vf_action;
1939         enum bnxt_ulp_intf_type intf_type;
1940         uint32_t ifindex;
1941         struct bnxt *bp;
1942
1943         vf_action = action_item->conf;
1944         if (!vf_action) {
1945                 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1946                 return BNXT_TF_RC_PARSE_ERR;
1947         }
1948
1949         if (vf_action->original) {
1950                 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1951                 return BNXT_TF_RC_PARSE_ERR;
1952         }
1953
1954         bp = bnxt_get_bp(params->port_id);
1955         if (bp == NULL) {
1956                 BNXT_TF_DBG(ERR, "Invalid bp\n");
1957                 return BNXT_TF_RC_ERROR;
1958         }
1959
1960         /* vf_action->id is a logical number which in this case is an
1961          * offset from the first VF. So, to get the absolute VF id, the
1962          * offset must be added to the absolute first vf id of that port.
1963          */
1964         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
1965                                                  bp->first_vf_id + vf_action->id,
1966                                                  &ifindex)) {
1967                 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1968                 return BNXT_TF_RC_ERROR;
1969         }
1970         /* Check the port is VF port */
1971         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1972         if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1973             intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1974                 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1975                 return BNXT_TF_RC_ERROR;
1976         }
1977
1978         /* Update the action properties */
1979         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1980         return ulp_rte_parser_act_port_set(params, ifindex);
1981 }
1982
1983 /* Function to handle the parsing of RTE Flow action port_id. */
1984 int32_t
1985 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1986                             struct ulp_rte_parser_params *param)
1987 {
1988         const struct rte_flow_action_port_id *port_id = act_item->conf;
1989         uint32_t ifindex;
1990         enum bnxt_ulp_intf_type intf_type;
1991
1992         if (!port_id) {
1993                 BNXT_TF_DBG(ERR,
1994                             "ParseErr: Invalid Argument\n");
1995                 return BNXT_TF_RC_PARSE_ERR;
1996         }
1997         if (port_id->original) {
1998                 BNXT_TF_DBG(ERR,
1999                             "ParseErr:Portid Original not supported\n");
2000                 return BNXT_TF_RC_PARSE_ERR;
2001         }
2002
2003         /* Get the port db ifindex */
2004         if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2005                                               &ifindex)) {
2006                 BNXT_TF_DBG(ERR, "Invalid port id\n");
2007                 return BNXT_TF_RC_ERROR;
2008         }
2009
2010         /* Get the intf type */
2011         intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2012         if (!intf_type) {
2013                 BNXT_TF_DBG(ERR, "Invalid port type\n");
2014                 return BNXT_TF_RC_ERROR;
2015         }
2016
2017         /* Set the action port */
2018         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2019         return ulp_rte_parser_act_port_set(param, ifindex);
2020 }
2021
2022 /* Function to handle the parsing of RTE Flow action phy_port. */
2023 int32_t
2024 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2025                              struct ulp_rte_parser_params *prm)
2026 {
2027         const struct rte_flow_action_phy_port *phy_port;
2028         uint32_t pid;
2029         int32_t rc;
2030         uint16_t pid_s;
2031         enum bnxt_ulp_direction_type dir;
2032
2033         phy_port = action_item->conf;
2034         if (!phy_port) {
2035                 BNXT_TF_DBG(ERR,
2036                             "ParseErr: Invalid Argument\n");
2037                 return BNXT_TF_RC_PARSE_ERR;
2038         }
2039
2040         if (phy_port->original) {
2041                 BNXT_TF_DBG(ERR,
2042                             "Parse Err:Port Original not supported\n");
2043                 return BNXT_TF_RC_PARSE_ERR;
2044         }
2045         dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2046         if (dir != BNXT_ULP_DIR_EGRESS) {
2047                 BNXT_TF_DBG(ERR,
2048                             "Parse Err:Phy ports are valid only for egress\n");
2049                 return BNXT_TF_RC_PARSE_ERR;
2050         }
2051         /* Get the physical port details from port db */
2052         rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2053                                             &pid_s);
2054         if (rc) {
2055                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2056                 return -EINVAL;
2057         }
2058
2059         pid = pid_s;
2060         pid = rte_cpu_to_be_32(pid);
2061         memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2062                &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2063
2064         /* Update the action port set bit */
2065         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2066         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2067                             BNXT_ULP_INTF_TYPE_PHY_PORT);
2068         return BNXT_TF_RC_SUCCESS;
2069 }
2070
2071 /* Function to handle the parsing of RTE Flow action pop vlan. */
2072 int32_t
2073 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2074                                 struct ulp_rte_parser_params *params)
2075 {
2076         /* Update the act_bitmap with pop */
2077         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2078         return BNXT_TF_RC_SUCCESS;
2079 }
2080
2081 /* Function to handle the parsing of RTE Flow action push vlan. */
2082 int32_t
2083 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2084                                  struct ulp_rte_parser_params *params)
2085 {
2086         const struct rte_flow_action_of_push_vlan *push_vlan;
2087         uint16_t ethertype;
2088         struct ulp_rte_act_prop *act = &params->act_prop;
2089
2090         push_vlan = action_item->conf;
2091         if (push_vlan) {
2092                 ethertype = push_vlan->ethertype;
2093                 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2094                         BNXT_TF_DBG(ERR,
2095                                     "Parse Err: Ethertype not supported\n");
2096                         return BNXT_TF_RC_PARSE_ERR;
2097                 }
2098                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2099                        &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2100                 /* Update the hdr_bitmap with push vlan */
2101                 ULP_BITMAP_SET(params->act_bitmap.bits,
2102                                BNXT_ULP_ACT_BIT_PUSH_VLAN);
2103                 return BNXT_TF_RC_SUCCESS;
2104         }
2105         BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2106         return BNXT_TF_RC_ERROR;
2107 }
2108
2109 /* Function to handle the parsing of RTE Flow action set vlan id. */
2110 int32_t
2111 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2112                                     struct ulp_rte_parser_params *params)
2113 {
2114         const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2115         uint32_t vid;
2116         struct ulp_rte_act_prop *act = &params->act_prop;
2117
2118         vlan_vid = action_item->conf;
2119         if (vlan_vid && vlan_vid->vlan_vid) {
2120                 vid = vlan_vid->vlan_vid;
2121                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2122                        &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2123                 /* Update the hdr_bitmap with vlan vid */
2124                 ULP_BITMAP_SET(params->act_bitmap.bits,
2125                                BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2126                 return BNXT_TF_RC_SUCCESS;
2127         }
2128         BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2129         return BNXT_TF_RC_ERROR;
2130 }
2131
2132 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2133 int32_t
2134 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2135                                     struct ulp_rte_parser_params *params)
2136 {
2137         const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2138         uint8_t pcp;
2139         struct ulp_rte_act_prop *act = &params->act_prop;
2140
2141         vlan_pcp = action_item->conf;
2142         if (vlan_pcp) {
2143                 pcp = vlan_pcp->vlan_pcp;
2144                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2145                        &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2146                 /* Update the hdr_bitmap with vlan vid */
2147                 ULP_BITMAP_SET(params->act_bitmap.bits,
2148                                BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2149                 return BNXT_TF_RC_SUCCESS;
2150         }
2151         BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2152         return BNXT_TF_RC_ERROR;
2153 }
2154
2155 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2156 int32_t
2157 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2158                                  struct ulp_rte_parser_params *params)
2159 {
2160         const struct rte_flow_action_set_ipv4 *set_ipv4;
2161         struct ulp_rte_act_prop *act = &params->act_prop;
2162
2163         set_ipv4 = action_item->conf;
2164         if (set_ipv4) {
2165                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2166                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2167                 /* Update the hdr_bitmap with set ipv4 src */
2168                 ULP_BITMAP_SET(params->act_bitmap.bits,
2169                                BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2170                 return BNXT_TF_RC_SUCCESS;
2171         }
2172         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2173         return BNXT_TF_RC_ERROR;
2174 }
2175
2176 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2177 int32_t
2178 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2179                                  struct ulp_rte_parser_params *params)
2180 {
2181         const struct rte_flow_action_set_ipv4 *set_ipv4;
2182         struct ulp_rte_act_prop *act = &params->act_prop;
2183
2184         set_ipv4 = action_item->conf;
2185         if (set_ipv4) {
2186                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2187                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2188                 /* Update the hdr_bitmap with set ipv4 dst */
2189                 ULP_BITMAP_SET(params->act_bitmap.bits,
2190                                BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2191                 return BNXT_TF_RC_SUCCESS;
2192         }
2193         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2194         return BNXT_TF_RC_ERROR;
2195 }
2196
2197 /* Function to handle the parsing of RTE Flow action set tp src.*/
2198 int32_t
2199 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2200                                struct ulp_rte_parser_params *params)
2201 {
2202         const struct rte_flow_action_set_tp *set_tp;
2203         struct ulp_rte_act_prop *act = &params->act_prop;
2204
2205         set_tp = action_item->conf;
2206         if (set_tp) {
2207                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2208                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2209                 /* Update the hdr_bitmap with set tp src */
2210                 ULP_BITMAP_SET(params->act_bitmap.bits,
2211                                BNXT_ULP_ACT_BIT_SET_TP_SRC);
2212                 return BNXT_TF_RC_SUCCESS;
2213         }
2214
2215         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2216         return BNXT_TF_RC_ERROR;
2217 }
2218
2219 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2220 int32_t
2221 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2222                                struct ulp_rte_parser_params *params)
2223 {
2224         const struct rte_flow_action_set_tp *set_tp;
2225         struct ulp_rte_act_prop *act = &params->act_prop;
2226
2227         set_tp = action_item->conf;
2228         if (set_tp) {
2229                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2230                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2231                 /* Update the hdr_bitmap with set tp dst */
2232                 ULP_BITMAP_SET(params->act_bitmap.bits,
2233                                BNXT_ULP_ACT_BIT_SET_TP_DST);
2234                 return BNXT_TF_RC_SUCCESS;
2235         }
2236
2237         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2238         return BNXT_TF_RC_ERROR;
2239 }
2240
2241 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2242 int32_t
2243 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2244                             struct ulp_rte_parser_params *params)
2245 {
2246         /* Update the act_bitmap with dec ttl */
2247         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2248         return BNXT_TF_RC_SUCCESS;
2249 }
2250
2251 /* Function to handle the parsing of RTE Flow action JUMP */
2252 int32_t
2253 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2254                             struct ulp_rte_parser_params *params)
2255 {
2256         /* Update the act_bitmap with dec ttl */
2257         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2258         return BNXT_TF_RC_SUCCESS;
2259 }