net/bnxt: support Thor platform
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
14 #include "tfp.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
18 #include "ulp_tun.h"
19 #include "ulp_template_db_tbl.h"
20
21 /* Local defines for the parsing functions */
22 #define ULP_VLAN_PRIORITY_SHIFT         13 /* First 3 bits */
23 #define ULP_VLAN_PRIORITY_MASK          0x700
24 #define ULP_VLAN_TAG_MASK               0xFFF /* Last 12 bits*/
25 #define ULP_UDP_PORT_VXLAN              4789
26
27 /* Utility function to skip the void items. */
28 static inline int32_t
29 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
30 {
31         if (!*item)
32                 return 0;
33         if (increment)
34                 (*item)++;
35         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
36                 (*item)++;
37         if (*item)
38                 return 1;
39         return 0;
40 }
41
42 /* Utility function to update the field_bitmap */
43 static void
44 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
45                                    uint32_t idx)
46 {
47         struct ulp_rte_hdr_field *field;
48
49         field = &params->hdr_field[idx];
50         if (ulp_bitmap_notzero(field->mask, field->size)) {
51                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
52                 /* Not exact match */
53                 if (!ulp_bitmap_is_ones(field->mask, field->size))
54                         ULP_COMP_FLD_IDX_WR(params,
55                                             BNXT_ULP_CF_IDX_WC_MATCH, 1);
56         } else {
57                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
58         }
59 }
60
61 /* Utility function to copy field spec items */
62 static struct ulp_rte_hdr_field *
63 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
64                         const void *buffer,
65                         uint32_t size)
66 {
67         field->size = size;
68         memcpy(field->spec, buffer, field->size);
69         field++;
70         return field;
71 }
72
73 /* Utility function to copy field masks items */
74 static void
75 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
76                        uint32_t *idx,
77                        const void *buffer,
78                        uint32_t size)
79 {
80         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
81
82         memcpy(field->mask, buffer, size);
83         ulp_rte_parser_field_bitmap_update(params, *idx);
84         *idx = *idx + 1;
85 }
86
87 /* Utility function to ignore field masks items */
88 static void
89 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
90                          uint32_t *idx,
91                          const void *buffer __rte_unused,
92                          uint32_t size __rte_unused)
93 {
94         *idx = *idx + 1;
95 }
96
97 /*
98  * Function to handle the parsing of RTE Flows and placing
99  * the RTE flow items into the ulp structures.
100  */
101 int32_t
102 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
103                               struct ulp_rte_parser_params *params)
104 {
105         const struct rte_flow_item *item = pattern;
106         struct bnxt_ulp_rte_hdr_info *hdr_info;
107
108         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
109
110         /* Set the computed flags for no vlan tags before parsing */
111         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
112         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
113
114         /* Parse all the items in the pattern */
115         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
116                 /* get the header information from the flow_hdr_info table */
117                 hdr_info = &ulp_hdr_info[item->type];
118                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
119                         BNXT_TF_DBG(ERR,
120                                     "Truflow parser does not support type %d\n",
121                                     item->type);
122                         return BNXT_TF_RC_PARSE_ERR;
123                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
124                         /* call the registered callback handler */
125                         if (hdr_info->proto_hdr_func) {
126                                 if (hdr_info->proto_hdr_func(item, params) !=
127                                     BNXT_TF_RC_SUCCESS) {
128                                         return BNXT_TF_RC_ERROR;
129                                 }
130                         }
131                 }
132                 item++;
133         }
134         /* update the implied SVIF */
135         return ulp_rte_parser_implicit_match_port_process(params);
136 }
137
138 /*
139  * Function to handle the parsing of RTE Flows and placing
140  * the RTE flow actions into the ulp structures.
141  */
142 int32_t
143 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
144                               struct ulp_rte_parser_params *params)
145 {
146         const struct rte_flow_action *action_item = actions;
147         struct bnxt_ulp_rte_act_info *hdr_info;
148
149         /* Parse all the items in the pattern */
150         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
151                 /* get the header information from the flow_hdr_info table */
152                 hdr_info = &ulp_act_info[action_item->type];
153                 if (hdr_info->act_type ==
154                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
155                         BNXT_TF_DBG(ERR,
156                                     "Truflow parser does not support act %u\n",
157                                     action_item->type);
158                         return BNXT_TF_RC_ERROR;
159                 } else if (hdr_info->act_type ==
160                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
161                         /* call the registered callback handler */
162                         if (hdr_info->proto_act_func) {
163                                 if (hdr_info->proto_act_func(action_item,
164                                                              params) !=
165                                     BNXT_TF_RC_SUCCESS) {
166                                         return BNXT_TF_RC_ERROR;
167                                 }
168                         }
169                 }
170                 action_item++;
171         }
172         /* update the implied port details */
173         ulp_rte_parser_implicit_act_port_process(params);
174         return BNXT_TF_RC_SUCCESS;
175 }
176
177 /*
178  * Function to handle the post processing of the computed
179  * fields for the interface.
180  */
181 static void
182 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
183 {
184         uint32_t ifindex;
185         uint16_t port_id, parif;
186         uint32_t mtype;
187         enum bnxt_ulp_direction_type dir;
188
189         /* get the direction details */
190         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
191
192         /* read the port id details */
193         port_id = ULP_COMP_FLD_IDX_RD(params,
194                                       BNXT_ULP_CF_IDX_INCOMING_IF);
195         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
196                                               port_id,
197                                               &ifindex)) {
198                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
199                 return;
200         }
201
202         if (dir == BNXT_ULP_DIR_INGRESS) {
203                 /* Set port PARIF */
204                 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
205                                           BNXT_ULP_PHY_PORT_PARIF, &parif)) {
206                         BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
207                         return;
208                 }
209                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
210                                     parif);
211         } else {
212                 /* Get the match port type */
213                 mtype = ULP_COMP_FLD_IDX_RD(params,
214                                             BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
215                 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
216                         ULP_COMP_FLD_IDX_WR(params,
217                                             BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
218                                             1);
219                         /* Set VF func PARIF */
220                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
221                                                   BNXT_ULP_VF_FUNC_PARIF,
222                                                   &parif)) {
223                                 BNXT_TF_DBG(ERR,
224                                             "ParseErr:ifindex is not valid\n");
225                                 return;
226                         }
227                         ULP_COMP_FLD_IDX_WR(params,
228                                             BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
229                                             parif);
230
231                 } else {
232                         /* Set DRV func PARIF */
233                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
234                                                   BNXT_ULP_DRV_FUNC_PARIF,
235                                                   &parif)) {
236                                 BNXT_TF_DBG(ERR,
237                                             "ParseErr:ifindex is not valid\n");
238                                 return;
239                         }
240                         ULP_COMP_FLD_IDX_WR(params,
241                                             BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
242                                             parif);
243                 }
244                 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
245                         ULP_COMP_FLD_IDX_WR(params,
246                                             BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
247                                             1);
248                 }
249         }
250 }
251
252 static int32_t
253 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
254 {
255         enum bnxt_ulp_intf_type match_port_type, act_port_type;
256         enum bnxt_ulp_direction_type dir;
257         uint32_t act_port_set;
258
259         /* Get the computed details */
260         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
261         match_port_type = ULP_COMP_FLD_IDX_RD(params,
262                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263         act_port_type = ULP_COMP_FLD_IDX_RD(params,
264                                             BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
265         act_port_set = ULP_COMP_FLD_IDX_RD(params,
266                                            BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
267
268         /* set the flow direction in the proto and action header */
269         if (dir == BNXT_ULP_DIR_EGRESS) {
270                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
271                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
272                 ULP_BITMAP_SET(params->act_bitmap.bits,
273                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
274         }
275
276         /* calculate the VF to VF flag */
277         if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
278             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
279                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
280
281         /* Update the decrement ttl computational fields */
282         if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
283                              BNXT_ULP_ACT_BIT_DEC_TTL)) {
284                 /*
285                  * Check that vxlan proto is included and vxlan decap
286                  * action is not set then decrement tunnel ttl.
287                  * Similarly add GRE and NVGRE in future.
288                  */
289                 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
290                                       BNXT_ULP_HDR_BIT_T_VXLAN) &&
291                     !ULP_BITMAP_ISSET(params->act_bitmap.bits,
292                                       BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
293                         ULP_COMP_FLD_IDX_WR(params,
294                                             BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
295                 } else {
296                         ULP_COMP_FLD_IDX_WR(params,
297                                             BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
298                 }
299         }
300
301         /* Merge the hdr_fp_bit into the proto header bit */
302         params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
303
304         /* Update the comp fld fid */
305         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
306
307         /* Update the computed interface parameters */
308         bnxt_ulp_comp_fld_intf_update(params);
309
310         /* TBD: Handle the flow rejection scenarios */
311         return 0;
312 }
313
314 /*
315  * Function to handle the post processing of the parsing details
316  */
317 int32_t
318 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
319 {
320         ulp_post_process_normal_flow(params);
321         return ulp_post_process_tun_flow(params);
322 }
323
324 /*
325  * Function to compute the flow direction based on the match port details
326  */
327 static void
328 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
329 {
330         enum bnxt_ulp_intf_type match_port_type;
331
332         /* Get the match port type */
333         match_port_type = ULP_COMP_FLD_IDX_RD(params,
334                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
335
336         /* If ingress flow and matchport is vf rep then dir is egress*/
337         if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
338             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
339                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
340                                     BNXT_ULP_DIR_EGRESS);
341         } else {
342                 /* Assign the input direction */
343                 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
344                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
345                                             BNXT_ULP_DIR_INGRESS);
346                 else
347                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
348                                             BNXT_ULP_DIR_EGRESS);
349         }
350 }
351
352 /* Function to handle the parsing of RTE Flow item PF Header. */
353 static int32_t
354 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
355                         uint32_t ifindex,
356                         uint16_t mask)
357 {
358         uint16_t svif;
359         enum bnxt_ulp_direction_type dir;
360         struct ulp_rte_hdr_field *hdr_field;
361         enum bnxt_ulp_svif_type svif_type;
362         enum bnxt_ulp_intf_type port_type;
363
364         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
365             BNXT_ULP_INVALID_SVIF_VAL) {
366                 BNXT_TF_DBG(ERR,
367                             "SVIF already set,multiple source not support'd\n");
368                 return BNXT_TF_RC_ERROR;
369         }
370
371         /* Get port type details */
372         port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
373         if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
374                 BNXT_TF_DBG(ERR, "Invalid port type\n");
375                 return BNXT_TF_RC_ERROR;
376         }
377
378         /* Update the match port type */
379         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
380
381         /* compute the direction */
382         bnxt_ulp_rte_parser_direction_compute(params);
383
384         /* Get the computed direction */
385         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
386         if (dir == BNXT_ULP_DIR_INGRESS) {
387                 svif_type = BNXT_ULP_PHY_PORT_SVIF;
388         } else {
389                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
390                         svif_type = BNXT_ULP_VF_FUNC_SVIF;
391                 else
392                         svif_type = BNXT_ULP_DRV_FUNC_SVIF;
393         }
394         ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
395                              &svif);
396         svif = rte_cpu_to_be_16(svif);
397         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
398         memcpy(hdr_field->spec, &svif, sizeof(svif));
399         memcpy(hdr_field->mask, &mask, sizeof(mask));
400         hdr_field->size = sizeof(svif);
401         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
402                             rte_be_to_cpu_16(svif));
403         return BNXT_TF_RC_SUCCESS;
404 }
405
406 /* Function to handle the parsing of the RTE port id */
407 int32_t
408 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
409 {
410         uint16_t port_id = 0;
411         uint16_t svif_mask = 0xFFFF;
412         uint32_t ifindex;
413         int32_t rc = BNXT_TF_RC_ERROR;
414
415         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
416             BNXT_ULP_INVALID_SVIF_VAL)
417                 return BNXT_TF_RC_SUCCESS;
418
419         /* SVIF not set. So get the port id */
420         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
421
422         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
423                                               port_id,
424                                               &ifindex)) {
425                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
426                 return rc;
427         }
428
429         /* Update the SVIF details */
430         rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
431         return rc;
432 }
433
434 /* Function to handle the implicit action port id */
435 int32_t
436 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
437 {
438         struct rte_flow_action action_item = {0};
439         struct rte_flow_action_port_id port_id = {0};
440
441         /* Read the action port set bit */
442         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
443                 /* Already set, so just exit */
444                 return BNXT_TF_RC_SUCCESS;
445         }
446         port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
447         action_item.conf = &port_id;
448
449         /* Update the action port based on incoming port */
450         ulp_rte_port_id_act_handler(&action_item, params);
451
452         /* Reset the action port set bit */
453         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
454         return BNXT_TF_RC_SUCCESS;
455 }
456
457 /* Function to handle the parsing of RTE Flow item PF Header. */
458 int32_t
459 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
460                        struct ulp_rte_parser_params *params)
461 {
462         uint16_t port_id = 0;
463         uint16_t svif_mask = 0xFFFF;
464         uint32_t ifindex;
465
466         /* Get the implicit port id */
467         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
468
469         /* perform the conversion from dpdk port to bnxt ifindex */
470         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
471                                               port_id,
472                                               &ifindex)) {
473                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
474                 return BNXT_TF_RC_ERROR;
475         }
476
477         /* Update the SVIF details */
478         return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
479 }
480
481 /* Function to handle the parsing of RTE Flow item VF Header. */
482 int32_t
483 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
484                        struct ulp_rte_parser_params *params)
485 {
486         const struct rte_flow_item_vf *vf_spec = item->spec;
487         const struct rte_flow_item_vf *vf_mask = item->mask;
488         uint16_t mask = 0;
489         uint32_t ifindex;
490         int32_t rc = BNXT_TF_RC_PARSE_ERR;
491
492         /* Get VF rte_flow_item for Port details */
493         if (!vf_spec) {
494                 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
495                 return rc;
496         }
497         if (!vf_mask) {
498                 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
499                 return rc;
500         }
501         mask = vf_mask->id;
502
503         /* perform the conversion from VF Func id to bnxt ifindex */
504         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
505                                                  vf_spec->id,
506                                                  &ifindex)) {
507                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
508                 return rc;
509         }
510         /* Update the SVIF details */
511         return ulp_rte_parser_svif_set(params, ifindex, mask);
512 }
513
514 /* Function to handle the parsing of RTE Flow item port id  Header. */
515 int32_t
516 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
517                             struct ulp_rte_parser_params *params)
518 {
519         const struct rte_flow_item_port_id *port_spec = item->spec;
520         const struct rte_flow_item_port_id *port_mask = item->mask;
521         uint16_t mask = 0;
522         int32_t rc = BNXT_TF_RC_PARSE_ERR;
523         uint32_t ifindex;
524
525         if (!port_spec) {
526                 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
527                 return rc;
528         }
529         if (!port_mask) {
530                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
531                 return rc;
532         }
533         mask = port_mask->id;
534
535         /* perform the conversion from dpdk port to bnxt ifindex */
536         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
537                                               port_spec->id,
538                                               &ifindex)) {
539                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
540                 return rc;
541         }
542         /* Update the SVIF details */
543         return ulp_rte_parser_svif_set(params, ifindex, mask);
544 }
545
546 /* Function to handle the parsing of RTE Flow item phy port Header. */
547 int32_t
548 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
549                              struct ulp_rte_parser_params *params)
550 {
551         const struct rte_flow_item_phy_port *port_spec = item->spec;
552         const struct rte_flow_item_phy_port *port_mask = item->mask;
553         uint16_t mask = 0;
554         int32_t rc = BNXT_TF_RC_ERROR;
555         uint16_t svif;
556         enum bnxt_ulp_direction_type dir;
557         struct ulp_rte_hdr_field *hdr_field;
558
559         /* Copy the rte_flow_item for phy port into hdr_field */
560         if (!port_spec) {
561                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
562                 return rc;
563         }
564         if (!port_mask) {
565                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
566                 return rc;
567         }
568         mask = port_mask->index;
569
570         /* Update the match port type */
571         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
572                             BNXT_ULP_INTF_TYPE_PHY_PORT);
573
574         /* Compute the Hw direction */
575         bnxt_ulp_rte_parser_direction_compute(params);
576
577         /* Direction validation */
578         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
579         if (dir == BNXT_ULP_DIR_EGRESS) {
580                 BNXT_TF_DBG(ERR,
581                             "Parse Err:Phy ports are valid only for ingress\n");
582                 return BNXT_TF_RC_PARSE_ERR;
583         }
584
585         /* Get the physical port details from port db */
586         rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
587                                            &svif);
588         if (rc) {
589                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
590                 return BNXT_TF_RC_PARSE_ERR;
591         }
592
593         /* Update the SVIF details */
594         svif = rte_cpu_to_be_16(svif);
595         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
596         memcpy(hdr_field->spec, &svif, sizeof(svif));
597         memcpy(hdr_field->mask, &mask, sizeof(mask));
598         hdr_field->size = sizeof(svif);
599         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
600                             rte_be_to_cpu_16(svif));
601         return BNXT_TF_RC_SUCCESS;
602 }
603
604 /* Function to handle the update of proto header based on field values */
605 static void
606 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
607                              uint16_t type, uint32_t in_flag)
608 {
609         if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
610                 if (in_flag) {
611                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
612                                        BNXT_ULP_HDR_BIT_I_IPV4);
613                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
614                 } else {
615                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
616                                        BNXT_ULP_HDR_BIT_O_IPV4);
617                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
618                 }
619         } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
620                 if (in_flag) {
621                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
622                                        BNXT_ULP_HDR_BIT_I_IPV6);
623                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
624                 } else {
625                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
626                                        BNXT_ULP_HDR_BIT_O_IPV6);
627                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
628                 }
629         }
630 }
631
632 /* Internal Function to identify broadcast or multicast packets */
633 static int32_t
634 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
635 {
636         if (rte_is_multicast_ether_addr(eth_addr) ||
637             rte_is_broadcast_ether_addr(eth_addr)) {
638                 BNXT_TF_DBG(DEBUG,
639                             "No support for bcast or mcast addr offload\n");
640                 return 1;
641         }
642         return 0;
643 }
644
645 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
646 int32_t
647 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
648                         struct ulp_rte_parser_params *params)
649 {
650         const struct rte_flow_item_eth *eth_spec = item->spec;
651         const struct rte_flow_item_eth *eth_mask = item->mask;
652         struct ulp_rte_hdr_field *field;
653         uint32_t idx = params->field_idx;
654         uint32_t size;
655         uint16_t eth_type = 0;
656         uint32_t inner_flag = 0;
657
658         /*
659          * Copy the rte_flow_item for eth into hdr_field using ethernet
660          * header fields
661          */
662         if (eth_spec) {
663                 size = sizeof(eth_spec->dst.addr_bytes);
664                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
665                                                 eth_spec->dst.addr_bytes,
666                                                 size);
667                 /* Todo: work around to avoid multicast and broadcast addr */
668                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->dst))
669                         return BNXT_TF_RC_PARSE_ERR;
670
671                 size = sizeof(eth_spec->src.addr_bytes);
672                 field = ulp_rte_parser_fld_copy(field,
673                                                 eth_spec->src.addr_bytes,
674                                                 size);
675                 /* Todo: work around to avoid multicast and broadcast addr */
676                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->src))
677                         return BNXT_TF_RC_PARSE_ERR;
678
679                 field = ulp_rte_parser_fld_copy(field,
680                                                 &eth_spec->type,
681                                                 sizeof(eth_spec->type));
682                 eth_type = eth_spec->type;
683         }
684         if (eth_mask) {
685                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
686                                        sizeof(eth_mask->dst.addr_bytes));
687                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
688                                        sizeof(eth_mask->src.addr_bytes));
689                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
690                                        sizeof(eth_mask->type));
691         }
692         /* Add number of Eth header elements */
693         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
694
695         /* Update the protocol hdr bitmap */
696         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697                              BNXT_ULP_HDR_BIT_O_ETH) ||
698             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699                              BNXT_ULP_HDR_BIT_O_IPV4) ||
700             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
701                              BNXT_ULP_HDR_BIT_O_IPV6) ||
702             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
703                              BNXT_ULP_HDR_BIT_O_UDP) ||
704             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
705                              BNXT_ULP_HDR_BIT_O_TCP)) {
706                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
707                 inner_flag = 1;
708         } else {
709                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
710         }
711         /* Update the field protocol hdr bitmap */
712         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
713
714         return BNXT_TF_RC_SUCCESS;
715 }
716
717 /* Function to handle the parsing of RTE Flow item Vlan Header. */
718 int32_t
719 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
720                          struct ulp_rte_parser_params *params)
721 {
722         const struct rte_flow_item_vlan *vlan_spec = item->spec;
723         const struct rte_flow_item_vlan *vlan_mask = item->mask;
724         struct ulp_rte_hdr_field *field;
725         struct ulp_rte_hdr_bitmap       *hdr_bit;
726         uint32_t idx = params->field_idx;
727         uint16_t vlan_tag, priority;
728         uint32_t outer_vtag_num;
729         uint32_t inner_vtag_num;
730         uint16_t eth_type = 0;
731         uint32_t inner_flag = 0;
732
733         /*
734          * Copy the rte_flow_item for vlan into hdr_field using Vlan
735          * header fields
736          */
737         if (vlan_spec) {
738                 vlan_tag = ntohs(vlan_spec->tci);
739                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
740                 vlan_tag &= ULP_VLAN_TAG_MASK;
741                 vlan_tag = htons(vlan_tag);
742
743                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
744                                                 &priority,
745                                                 sizeof(priority));
746                 field = ulp_rte_parser_fld_copy(field,
747                                                 &vlan_tag,
748                                                 sizeof(vlan_tag));
749
750                 field = ulp_rte_parser_fld_copy(field,
751                                                 &vlan_spec->inner_type,
752                                                 sizeof(vlan_spec->inner_type));
753                 eth_type = vlan_spec->inner_type;
754         }
755
756         if (vlan_mask) {
757                 vlan_tag = ntohs(vlan_mask->tci);
758                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
759                 vlan_tag &= 0xfff;
760
761                 /*
762                  * the storage for priority and vlan tag is 2 bytes
763                  * The mask of priority which is 3 bits if it is all 1's
764                  * then make the rest bits 13 bits as 1's
765                  * so that it is matched as exact match.
766                  */
767                 if (priority == ULP_VLAN_PRIORITY_MASK)
768                         priority |= ~ULP_VLAN_PRIORITY_MASK;
769                 if (vlan_tag == ULP_VLAN_TAG_MASK)
770                         vlan_tag |= ~ULP_VLAN_TAG_MASK;
771                 vlan_tag = htons(vlan_tag);
772
773                 /*
774                  * The priority field is ignored since OVS is setting it as
775                  * wild card match and it is not supported. This is a work
776                  * around and shall be addressed in the future.
777                  */
778                 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
779                                          sizeof(priority));
780
781                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
782                                        sizeof(vlan_tag));
783                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
784                                        sizeof(vlan_mask->inner_type));
785         }
786         /* Set the field index to new incremented value */
787         params->field_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
788
789         /* Get the outer tag and inner tag counts */
790         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
791                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
792         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
793                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
794
795         /* Update the hdr_bitmap of the vlans */
796         hdr_bit = &params->hdr_bitmap;
797         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
798             !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
799             !outer_vtag_num) {
800                 /* Update the vlan tag num */
801                 outer_vtag_num++;
802                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
803                                     outer_vtag_num);
804                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
805                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
806                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
807                                BNXT_ULP_HDR_BIT_OO_VLAN);
808         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
809                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
810                    outer_vtag_num == 1) {
811                 /* update the vlan tag num */
812                 outer_vtag_num++;
813                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
814                                     outer_vtag_num);
815                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
816                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
817                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
818                                BNXT_ULP_HDR_BIT_OI_VLAN);
819         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
820                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
821                    !inner_vtag_num) {
822                 /* update the vlan tag num */
823                 inner_vtag_num++;
824                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
825                                     inner_vtag_num);
826                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
827                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
828                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
829                                BNXT_ULP_HDR_BIT_IO_VLAN);
830                 inner_flag = 1;
831         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
832                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
833                    inner_vtag_num == 1) {
834                 /* update the vlan tag num */
835                 inner_vtag_num++;
836                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
837                                     inner_vtag_num);
838                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
839                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
840                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
841                                BNXT_ULP_HDR_BIT_II_VLAN);
842                 inner_flag = 1;
843         } else {
844                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
845                 return BNXT_TF_RC_ERROR;
846         }
847         /* Update the field protocol hdr bitmap */
848         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
849         return BNXT_TF_RC_SUCCESS;
850 }
851
852 /* Function to handle the update of proto header based on field values */
853 static void
854 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
855                              uint8_t proto, uint32_t in_flag)
856 {
857         if (proto == IPPROTO_UDP) {
858                 if (in_flag) {
859                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
860                                        BNXT_ULP_HDR_BIT_I_UDP);
861                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
862                 } else {
863                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
864                                        BNXT_ULP_HDR_BIT_O_UDP);
865                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
866                 }
867         } else if (proto == IPPROTO_TCP) {
868                 if (in_flag) {
869                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
870                                        BNXT_ULP_HDR_BIT_I_TCP);
871                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
872                 } else {
873                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
874                                        BNXT_ULP_HDR_BIT_O_TCP);
875                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
876                 }
877         } else if (proto == IPPROTO_GRE) {
878                 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
879         } else if (proto == IPPROTO_ICMP) {
880                 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
881                         ULP_BITMAP_SET(param->hdr_bitmap.bits,
882                                        BNXT_ULP_HDR_BIT_I_ICMP);
883                 else
884                         ULP_BITMAP_SET(param->hdr_bitmap.bits,
885                                        BNXT_ULP_HDR_BIT_O_ICMP);
886         }
887         if (proto) {
888                 if (in_flag) {
889                         ULP_COMP_FLD_IDX_WR(param,
890                                             BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
891                                             1);
892                         ULP_COMP_FLD_IDX_WR(param,
893                                             BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
894                                             proto);
895                 } else {
896                         ULP_COMP_FLD_IDX_WR(param,
897                                             BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
898                                             1);
899                         ULP_COMP_FLD_IDX_WR(param,
900                                             BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
901                                             proto);
902                 }
903         }
904 }
905
906 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
907 int32_t
908 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
909                          struct ulp_rte_parser_params *params)
910 {
911         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
912         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
913         struct ulp_rte_hdr_field *field;
914         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
915         uint32_t idx = params->field_idx;
916         uint32_t size;
917         uint8_t proto = 0;
918         uint32_t inner_flag = 0;
919         uint32_t cnt;
920
921         /* validate there are no 3rd L3 header */
922         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
923         if (cnt == 2) {
924                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
925                 return BNXT_TF_RC_ERROR;
926         }
927
928         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
929                               BNXT_ULP_HDR_BIT_O_ETH) &&
930             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
931                               BNXT_ULP_HDR_BIT_I_ETH)) {
932                 /* Since F2 flow does not include eth item, when parser detects
933                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
934                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
935                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
936                  * This will allow the parser post processor to update the
937                  * t_dmac in hdr_field[o_eth.dmac]
938                  */
939                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
940                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
941                 params->field_idx = idx;
942         }
943
944         /*
945          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
946          * header fields
947          */
948         if (ipv4_spec) {
949                 size = sizeof(ipv4_spec->hdr.version_ihl);
950                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
951                                                 &ipv4_spec->hdr.version_ihl,
952                                                 size);
953                 size = sizeof(ipv4_spec->hdr.type_of_service);
954                 field = ulp_rte_parser_fld_copy(field,
955                                                 &ipv4_spec->hdr.type_of_service,
956                                                 size);
957                 size = sizeof(ipv4_spec->hdr.total_length);
958                 field = ulp_rte_parser_fld_copy(field,
959                                                 &ipv4_spec->hdr.total_length,
960                                                 size);
961                 size = sizeof(ipv4_spec->hdr.packet_id);
962                 field = ulp_rte_parser_fld_copy(field,
963                                                 &ipv4_spec->hdr.packet_id,
964                                                 size);
965                 size = sizeof(ipv4_spec->hdr.fragment_offset);
966                 field = ulp_rte_parser_fld_copy(field,
967                                                 &ipv4_spec->hdr.fragment_offset,
968                                                 size);
969                 size = sizeof(ipv4_spec->hdr.time_to_live);
970                 field = ulp_rte_parser_fld_copy(field,
971                                                 &ipv4_spec->hdr.time_to_live,
972                                                 size);
973                 size = sizeof(ipv4_spec->hdr.next_proto_id);
974                 field = ulp_rte_parser_fld_copy(field,
975                                                 &ipv4_spec->hdr.next_proto_id,
976                                                 size);
977                 proto = ipv4_spec->hdr.next_proto_id;
978                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
979                 field = ulp_rte_parser_fld_copy(field,
980                                                 &ipv4_spec->hdr.hdr_checksum,
981                                                 size);
982                 size = sizeof(ipv4_spec->hdr.src_addr);
983                 field = ulp_rte_parser_fld_copy(field,
984                                                 &ipv4_spec->hdr.src_addr,
985                                                 size);
986                 size = sizeof(ipv4_spec->hdr.dst_addr);
987                 field = ulp_rte_parser_fld_copy(field,
988                                                 &ipv4_spec->hdr.dst_addr,
989                                                 size);
990         }
991         if (ipv4_mask) {
992                 ulp_rte_prsr_mask_copy(params, &idx,
993                                        &ipv4_mask->hdr.version_ihl,
994                                        sizeof(ipv4_mask->hdr.version_ihl));
995                 /*
996                  * The tos field is ignored since OVS is setting it as wild card
997                  * match and it is not supported. This is a work around and
998                  * shall be addressed in the future.
999                  */
1000                 ulp_rte_prsr_mask_ignore(params, &idx,
1001                                          &ipv4_mask->hdr.type_of_service,
1002                                          sizeof(ipv4_mask->hdr.type_of_service)
1003                                          );
1004
1005                 ulp_rte_prsr_mask_copy(params, &idx,
1006                                        &ipv4_mask->hdr.total_length,
1007                                        sizeof(ipv4_mask->hdr.total_length));
1008                 ulp_rte_prsr_mask_copy(params, &idx,
1009                                        &ipv4_mask->hdr.packet_id,
1010                                        sizeof(ipv4_mask->hdr.packet_id));
1011                 ulp_rte_prsr_mask_copy(params, &idx,
1012                                        &ipv4_mask->hdr.fragment_offset,
1013                                        sizeof(ipv4_mask->hdr.fragment_offset));
1014                 ulp_rte_prsr_mask_copy(params, &idx,
1015                                        &ipv4_mask->hdr.time_to_live,
1016                                        sizeof(ipv4_mask->hdr.time_to_live));
1017                 ulp_rte_prsr_mask_copy(params, &idx,
1018                                        &ipv4_mask->hdr.next_proto_id,
1019                                        sizeof(ipv4_mask->hdr.next_proto_id));
1020                 ulp_rte_prsr_mask_copy(params, &idx,
1021                                        &ipv4_mask->hdr.hdr_checksum,
1022                                        sizeof(ipv4_mask->hdr.hdr_checksum));
1023                 ulp_rte_prsr_mask_copy(params, &idx,
1024                                        &ipv4_mask->hdr.src_addr,
1025                                        sizeof(ipv4_mask->hdr.src_addr));
1026                 ulp_rte_prsr_mask_copy(params, &idx,
1027                                        &ipv4_mask->hdr.dst_addr,
1028                                        sizeof(ipv4_mask->hdr.dst_addr));
1029         }
1030         /* Add the number of ipv4 header elements */
1031         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1032
1033         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1034         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1035             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1036                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1037                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1038                 inner_flag = 1;
1039         } else {
1040                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1041                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1042         }
1043
1044         /* Some of the PMD applications may set the protocol field
1045          * in the IPv4 spec but don't set the mask. So, consider
1046          * the mask in the proto value calculation.
1047          */
1048         if (ipv4_mask)
1049                 proto &= ipv4_mask->hdr.next_proto_id;
1050
1051         /* Update the field protocol hdr bitmap */
1052         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1053         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1054         return BNXT_TF_RC_SUCCESS;
1055 }
1056
1057 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1058 int32_t
1059 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1060                          struct ulp_rte_parser_params *params)
1061 {
1062         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1063         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1064         struct ulp_rte_hdr_field *field;
1065         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1066         uint32_t idx = params->field_idx;
1067         uint32_t size;
1068         uint32_t vtcf, vtcf_mask;
1069         uint8_t proto = 0;
1070         uint32_t inner_flag = 0;
1071         uint32_t cnt;
1072
1073         /* validate there are no 3rd L3 header */
1074         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1075         if (cnt == 2) {
1076                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1077                 return BNXT_TF_RC_ERROR;
1078         }
1079
1080         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1081                               BNXT_ULP_HDR_BIT_O_ETH) &&
1082             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1083                               BNXT_ULP_HDR_BIT_I_ETH)) {
1084                 /* Since F2 flow does not include eth item, when parser detects
1085                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1086                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1087                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1088                  * This will allow the parser post processor to update the
1089                  * t_dmac in hdr_field[o_eth.dmac]
1090                  */
1091                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1092                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
1093                 params->field_idx = idx;
1094         }
1095
1096         /*
1097          * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1098          * header fields
1099          */
1100         if (ipv6_spec) {
1101                 size = sizeof(ipv6_spec->hdr.vtc_flow);
1102
1103                 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1104                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1105                                                 &vtcf,
1106                                                 size);
1107
1108                 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1109                 field = ulp_rte_parser_fld_copy(field,
1110                                                 &vtcf,
1111                                                 size);
1112
1113                 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1114                 field = ulp_rte_parser_fld_copy(field,
1115                                                 &vtcf,
1116                                                 size);
1117
1118                 size = sizeof(ipv6_spec->hdr.payload_len);
1119                 field = ulp_rte_parser_fld_copy(field,
1120                                                 &ipv6_spec->hdr.payload_len,
1121                                                 size);
1122                 size = sizeof(ipv6_spec->hdr.proto);
1123                 field = ulp_rte_parser_fld_copy(field,
1124                                                 &ipv6_spec->hdr.proto,
1125                                                 size);
1126                 proto = ipv6_spec->hdr.proto;
1127                 size = sizeof(ipv6_spec->hdr.hop_limits);
1128                 field = ulp_rte_parser_fld_copy(field,
1129                                                 &ipv6_spec->hdr.hop_limits,
1130                                                 size);
1131                 size = sizeof(ipv6_spec->hdr.src_addr);
1132                 field = ulp_rte_parser_fld_copy(field,
1133                                                 &ipv6_spec->hdr.src_addr,
1134                                                 size);
1135                 size = sizeof(ipv6_spec->hdr.dst_addr);
1136                 field = ulp_rte_parser_fld_copy(field,
1137                                                 &ipv6_spec->hdr.dst_addr,
1138                                                 size);
1139         }
1140         if (ipv6_mask) {
1141                 size = sizeof(ipv6_mask->hdr.vtc_flow);
1142
1143                 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1144                 ulp_rte_prsr_mask_copy(params, &idx,
1145                                        &vtcf_mask,
1146                                        size);
1147                 /*
1148                  * The TC and flow label field are ignored since OVS is setting
1149                  * it for match and it is not supported.
1150                  * This is a work around and
1151                  * shall be addressed in the future.
1152                  */
1153                 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1154                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1155                 vtcf_mask =
1156                         BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1157                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1158
1159                 ulp_rte_prsr_mask_copy(params, &idx,
1160                                        &ipv6_mask->hdr.payload_len,
1161                                        sizeof(ipv6_mask->hdr.payload_len));
1162                 ulp_rte_prsr_mask_copy(params, &idx,
1163                                        &ipv6_mask->hdr.proto,
1164                                        sizeof(ipv6_mask->hdr.proto));
1165                 ulp_rte_prsr_mask_copy(params, &idx,
1166                                        &ipv6_mask->hdr.hop_limits,
1167                                        sizeof(ipv6_mask->hdr.hop_limits));
1168                 ulp_rte_prsr_mask_copy(params, &idx,
1169                                        &ipv6_mask->hdr.src_addr,
1170                                        sizeof(ipv6_mask->hdr.src_addr));
1171                 ulp_rte_prsr_mask_copy(params, &idx,
1172                                        &ipv6_mask->hdr.dst_addr,
1173                                        sizeof(ipv6_mask->hdr.dst_addr));
1174         }
1175         /* add number of ipv6 header elements */
1176         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1177
1178         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1179         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1180             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1181                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1182                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1183                 inner_flag = 1;
1184         } else {
1185                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1186                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1187         }
1188
1189         /* Some of the PMD applications may set the protocol field
1190          * in the IPv6 spec but don't set the mask. So, consider
1191          * the mask in proto value calculation.
1192          */
1193         if (ipv6_mask)
1194                 proto &= ipv6_mask->hdr.proto;
1195
1196         /* Update the field protocol hdr bitmap */
1197         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1198         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1199
1200         return BNXT_TF_RC_SUCCESS;
1201 }
1202
1203 /* Function to handle the update of proto header based on field values */
1204 static void
1205 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1206                              uint16_t dst_port)
1207 {
1208         if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
1209                 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1210                                BNXT_ULP_HDR_BIT_T_VXLAN);
1211
1212         if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1213                              BNXT_ULP_HDR_BIT_T_VXLAN) ||
1214             ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1215                              BNXT_ULP_HDR_BIT_T_GRE))
1216                 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1217
1218 }
1219
1220 /* Function to handle the parsing of RTE Flow item UDP Header. */
1221 int32_t
1222 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1223                         struct ulp_rte_parser_params *params)
1224 {
1225         const struct rte_flow_item_udp *udp_spec = item->spec;
1226         const struct rte_flow_item_udp *udp_mask = item->mask;
1227         struct ulp_rte_hdr_field *field;
1228         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1229         uint32_t idx = params->field_idx;
1230         uint32_t size;
1231         uint16_t dport = 0, sport = 0;
1232         uint32_t cnt;
1233
1234         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1235         if (cnt == 2) {
1236                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1237                 return BNXT_TF_RC_ERROR;
1238         }
1239
1240         /*
1241          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1242          * header fields
1243          */
1244         if (udp_spec) {
1245                 size = sizeof(udp_spec->hdr.src_port);
1246                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1247                                                 &udp_spec->hdr.src_port,
1248                                                 size);
1249                 sport = udp_spec->hdr.src_port;
1250                 size = sizeof(udp_spec->hdr.dst_port);
1251                 field = ulp_rte_parser_fld_copy(field,
1252                                                 &udp_spec->hdr.dst_port,
1253                                                 size);
1254                 dport = udp_spec->hdr.dst_port;
1255                 size = sizeof(udp_spec->hdr.dgram_len);
1256                 field = ulp_rte_parser_fld_copy(field,
1257                                                 &udp_spec->hdr.dgram_len,
1258                                                 size);
1259                 size = sizeof(udp_spec->hdr.dgram_cksum);
1260                 field = ulp_rte_parser_fld_copy(field,
1261                                                 &udp_spec->hdr.dgram_cksum,
1262                                                 size);
1263         }
1264         if (udp_mask) {
1265                 ulp_rte_prsr_mask_copy(params, &idx,
1266                                        &udp_mask->hdr.src_port,
1267                                        sizeof(udp_mask->hdr.src_port));
1268                 ulp_rte_prsr_mask_copy(params, &idx,
1269                                        &udp_mask->hdr.dst_port,
1270                                        sizeof(udp_mask->hdr.dst_port));
1271                 ulp_rte_prsr_mask_copy(params, &idx,
1272                                        &udp_mask->hdr.dgram_len,
1273                                        sizeof(udp_mask->hdr.dgram_len));
1274                 ulp_rte_prsr_mask_copy(params, &idx,
1275                                        &udp_mask->hdr.dgram_cksum,
1276                                        sizeof(udp_mask->hdr.dgram_cksum));
1277         }
1278
1279         /* Add number of UDP header elements */
1280         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1281
1282         /* Set the udp header bitmap and computed l4 header bitmaps */
1283         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1284             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1285                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1286                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1287                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1288                                     (uint32_t)rte_be_to_cpu_16(sport));
1289                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1290                                     (uint32_t)rte_be_to_cpu_16(dport));
1291                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1292                                     1);
1293                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1294                                     IPPROTO_UDP);
1295                 if (udp_mask && udp_mask->hdr.src_port)
1296                         ULP_COMP_FLD_IDX_WR(params,
1297                                             BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1298                                             1);
1299                 if (udp_mask && udp_mask->hdr.dst_port)
1300                         ULP_COMP_FLD_IDX_WR(params,
1301                                             BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1302                                             1);
1303         } else {
1304                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1305                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1306                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1307                                     (uint32_t)rte_be_to_cpu_16(sport));
1308                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1309                                     (uint32_t)rte_be_to_cpu_16(dport));
1310                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1311                                     1);
1312                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1313                                     IPPROTO_UDP);
1314                 if (udp_mask && udp_mask->hdr.src_port)
1315                         ULP_COMP_FLD_IDX_WR(params,
1316                                             BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1317                                             1);
1318                 if (udp_mask && udp_mask->hdr.dst_port)
1319                         ULP_COMP_FLD_IDX_WR(params,
1320                                             BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1321                                             1);
1322
1323                 /* Update the field protocol hdr bitmap */
1324                 ulp_rte_l4_proto_type_update(params, dport);
1325         }
1326         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1327         return BNXT_TF_RC_SUCCESS;
1328 }
1329
1330 /* Function to handle the parsing of RTE Flow item TCP Header. */
1331 int32_t
1332 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1333                         struct ulp_rte_parser_params *params)
1334 {
1335         const struct rte_flow_item_tcp *tcp_spec = item->spec;
1336         const struct rte_flow_item_tcp *tcp_mask = item->mask;
1337         struct ulp_rte_hdr_field *field;
1338         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1339         uint32_t idx = params->field_idx;
1340         uint16_t dport = 0, sport = 0;
1341         uint32_t size;
1342         uint32_t cnt;
1343
1344         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1345         if (cnt == 2) {
1346                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1347                 return BNXT_TF_RC_ERROR;
1348         }
1349
1350         /*
1351          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1352          * header fields
1353          */
1354         if (tcp_spec) {
1355                 sport = tcp_spec->hdr.src_port;
1356                 size = sizeof(tcp_spec->hdr.src_port);
1357                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1358                                                 &tcp_spec->hdr.src_port,
1359                                                 size);
1360                 dport = tcp_spec->hdr.dst_port;
1361                 size = sizeof(tcp_spec->hdr.dst_port);
1362                 field = ulp_rte_parser_fld_copy(field,
1363                                                 &tcp_spec->hdr.dst_port,
1364                                                 size);
1365                 size = sizeof(tcp_spec->hdr.sent_seq);
1366                 field = ulp_rte_parser_fld_copy(field,
1367                                                 &tcp_spec->hdr.sent_seq,
1368                                                 size);
1369                 size = sizeof(tcp_spec->hdr.recv_ack);
1370                 field = ulp_rte_parser_fld_copy(field,
1371                                                 &tcp_spec->hdr.recv_ack,
1372                                                 size);
1373                 size = sizeof(tcp_spec->hdr.data_off);
1374                 field = ulp_rte_parser_fld_copy(field,
1375                                                 &tcp_spec->hdr.data_off,
1376                                                 size);
1377                 size = sizeof(tcp_spec->hdr.tcp_flags);
1378                 field = ulp_rte_parser_fld_copy(field,
1379                                                 &tcp_spec->hdr.tcp_flags,
1380                                                 size);
1381                 size = sizeof(tcp_spec->hdr.rx_win);
1382                 field = ulp_rte_parser_fld_copy(field,
1383                                                 &tcp_spec->hdr.rx_win,
1384                                                 size);
1385                 size = sizeof(tcp_spec->hdr.cksum);
1386                 field = ulp_rte_parser_fld_copy(field,
1387                                                 &tcp_spec->hdr.cksum,
1388                                                 size);
1389                 size = sizeof(tcp_spec->hdr.tcp_urp);
1390                 field = ulp_rte_parser_fld_copy(field,
1391                                                 &tcp_spec->hdr.tcp_urp,
1392                                                 size);
1393         } else {
1394                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1395         }
1396
1397         if (tcp_mask) {
1398                 ulp_rte_prsr_mask_copy(params, &idx,
1399                                        &tcp_mask->hdr.src_port,
1400                                        sizeof(tcp_mask->hdr.src_port));
1401                 ulp_rte_prsr_mask_copy(params, &idx,
1402                                        &tcp_mask->hdr.dst_port,
1403                                        sizeof(tcp_mask->hdr.dst_port));
1404                 ulp_rte_prsr_mask_copy(params, &idx,
1405                                        &tcp_mask->hdr.sent_seq,
1406                                        sizeof(tcp_mask->hdr.sent_seq));
1407                 ulp_rte_prsr_mask_copy(params, &idx,
1408                                        &tcp_mask->hdr.recv_ack,
1409                                        sizeof(tcp_mask->hdr.recv_ack));
1410                 ulp_rte_prsr_mask_copy(params, &idx,
1411                                        &tcp_mask->hdr.data_off,
1412                                        sizeof(tcp_mask->hdr.data_off));
1413                 ulp_rte_prsr_mask_copy(params, &idx,
1414                                        &tcp_mask->hdr.tcp_flags,
1415                                        sizeof(tcp_mask->hdr.tcp_flags));
1416                 ulp_rte_prsr_mask_copy(params, &idx,
1417                                        &tcp_mask->hdr.rx_win,
1418                                        sizeof(tcp_mask->hdr.rx_win));
1419                 ulp_rte_prsr_mask_copy(params, &idx,
1420                                        &tcp_mask->hdr.cksum,
1421                                        sizeof(tcp_mask->hdr.cksum));
1422                 ulp_rte_prsr_mask_copy(params, &idx,
1423                                        &tcp_mask->hdr.tcp_urp,
1424                                        sizeof(tcp_mask->hdr.tcp_urp));
1425         }
1426         /* add number of TCP header elements */
1427         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1428
1429         /* Set the udp header bitmap and computed l4 header bitmaps */
1430         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1431             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1432                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1433                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1434                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1435                                     (uint32_t)rte_be_to_cpu_16(sport));
1436                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1437                                     (uint32_t)rte_be_to_cpu_16(dport));
1438                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1439                                     1);
1440                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1441                                     IPPROTO_TCP);
1442                 if (tcp_mask && tcp_mask->hdr.src_port)
1443                         ULP_COMP_FLD_IDX_WR(params,
1444                                             BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1445                                             1);
1446                 if (tcp_mask && tcp_mask->hdr.dst_port)
1447                         ULP_COMP_FLD_IDX_WR(params,
1448                                             BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1449                                             1);
1450         } else {
1451                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1452                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1453                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1454                                     (uint32_t)rte_be_to_cpu_16(sport));
1455                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1456                                     (uint32_t)rte_be_to_cpu_16(dport));
1457                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1458                                     1);
1459                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1460                                     IPPROTO_TCP);
1461                 if (tcp_mask && tcp_mask->hdr.src_port)
1462                         ULP_COMP_FLD_IDX_WR(params,
1463                                             BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1464                                             1);
1465                 if (tcp_mask && tcp_mask->hdr.dst_port)
1466                         ULP_COMP_FLD_IDX_WR(params,
1467                                             BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1468                                             1);
1469         }
1470         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1471         return BNXT_TF_RC_SUCCESS;
1472 }
1473
1474 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1475 int32_t
1476 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1477                           struct ulp_rte_parser_params *params)
1478 {
1479         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1480         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1481         struct ulp_rte_hdr_field *field;
1482         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1483         uint32_t idx = params->field_idx;
1484         uint32_t size;
1485
1486         /*
1487          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1488          * header fields
1489          */
1490         if (vxlan_spec) {
1491                 size = sizeof(vxlan_spec->flags);
1492                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1493                                                 &vxlan_spec->flags,
1494                                                 size);
1495                 size = sizeof(vxlan_spec->rsvd0);
1496                 field = ulp_rte_parser_fld_copy(field,
1497                                                 &vxlan_spec->rsvd0,
1498                                                 size);
1499                 size = sizeof(vxlan_spec->vni);
1500                 field = ulp_rte_parser_fld_copy(field,
1501                                                 &vxlan_spec->vni,
1502                                                 size);
1503                 size = sizeof(vxlan_spec->rsvd1);
1504                 field = ulp_rte_parser_fld_copy(field,
1505                                                 &vxlan_spec->rsvd1,
1506                                                 size);
1507         }
1508         if (vxlan_mask) {
1509                 ulp_rte_prsr_mask_copy(params, &idx,
1510                                        &vxlan_mask->flags,
1511                                        sizeof(vxlan_mask->flags));
1512                 ulp_rte_prsr_mask_copy(params, &idx,
1513                                        &vxlan_mask->rsvd0,
1514                                        sizeof(vxlan_mask->rsvd0));
1515                 ulp_rte_prsr_mask_copy(params, &idx,
1516                                        &vxlan_mask->vni,
1517                                        sizeof(vxlan_mask->vni));
1518                 ulp_rte_prsr_mask_copy(params, &idx,
1519                                        &vxlan_mask->rsvd1,
1520                                        sizeof(vxlan_mask->rsvd1));
1521         }
1522         /* Add number of vxlan header elements */
1523         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1524
1525         /* Update the hdr_bitmap with vxlan */
1526         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1527         ulp_rte_l4_proto_type_update(params, 0);
1528         return BNXT_TF_RC_SUCCESS;
1529 }
1530
1531 /* Function to handle the parsing of RTE Flow item GRE Header. */
1532 int32_t
1533 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1534                           struct ulp_rte_parser_params *params)
1535 {
1536         const struct rte_flow_item_gre *gre_spec = item->spec;
1537         const struct rte_flow_item_gre *gre_mask = item->mask;
1538         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1539         uint32_t idx = params->field_idx;
1540         uint32_t size;
1541         struct ulp_rte_hdr_field *field;
1542
1543         if (gre_spec) {
1544                 size = sizeof(gre_spec->c_rsvd0_ver);
1545                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1546                                                 &gre_spec->c_rsvd0_ver,
1547                                                 size);
1548                 size = sizeof(gre_spec->protocol);
1549                 field = ulp_rte_parser_fld_copy(field,
1550                                                 &gre_spec->protocol,
1551                                                 size);
1552         }
1553         if (gre_mask) {
1554                 ulp_rte_prsr_mask_copy(params, &idx,
1555                                        &gre_mask->c_rsvd0_ver,
1556                                        sizeof(gre_mask->c_rsvd0_ver));
1557                 ulp_rte_prsr_mask_copy(params, &idx,
1558                                        &gre_mask->protocol,
1559                                        sizeof(gre_mask->protocol));
1560         }
1561         /* Add number of GRE header elements */
1562         params->field_idx += BNXT_ULP_PROTO_HDR_GRE_NUM;
1563
1564         /* Update the hdr_bitmap with GRE */
1565         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1566         ulp_rte_l4_proto_type_update(params, 0);
1567         return BNXT_TF_RC_SUCCESS;
1568 }
1569
1570 /* Function to handle the parsing of RTE Flow item ANY. */
1571 int32_t
1572 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1573                          struct ulp_rte_parser_params *params __rte_unused)
1574 {
1575         return BNXT_TF_RC_SUCCESS;
1576 }
1577
1578 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1579 int32_t
1580 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1581                          struct ulp_rte_parser_params *params)
1582 {
1583         const struct rte_flow_item_icmp *icmp_spec = item->spec;
1584         const struct rte_flow_item_icmp *icmp_mask = item->mask;
1585         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1586         uint32_t idx = params->field_idx;
1587         uint32_t size;
1588         struct ulp_rte_hdr_field *field;
1589
1590         if (icmp_spec) {
1591                 size = sizeof(icmp_spec->hdr.icmp_type);
1592                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1593                                                 &icmp_spec->hdr.icmp_type,
1594                                                 size);
1595                 size = sizeof(icmp_spec->hdr.icmp_code);
1596                 field = ulp_rte_parser_fld_copy(field,
1597                                                 &icmp_spec->hdr.icmp_code,
1598                                                 size);
1599                 size = sizeof(icmp_spec->hdr.icmp_cksum);
1600                 field = ulp_rte_parser_fld_copy(field,
1601                                                 &icmp_spec->hdr.icmp_cksum,
1602                                                 size);
1603                 size = sizeof(icmp_spec->hdr.icmp_ident);
1604                 field = ulp_rte_parser_fld_copy(field,
1605                                                 &icmp_spec->hdr.icmp_ident,
1606                                                 size);
1607                 size = sizeof(icmp_spec->hdr.icmp_seq_nb);
1608                 field = ulp_rte_parser_fld_copy(field,
1609                                                 &icmp_spec->hdr.icmp_seq_nb,
1610                                                 size);
1611         }
1612         if (icmp_mask) {
1613                 ulp_rte_prsr_mask_copy(params, &idx,
1614                                        &icmp_mask->hdr.icmp_type,
1615                                        sizeof(icmp_mask->hdr.icmp_type));
1616                 ulp_rte_prsr_mask_copy(params, &idx,
1617                                        &icmp_mask->hdr.icmp_code,
1618                                        sizeof(icmp_mask->hdr.icmp_code));
1619                 ulp_rte_prsr_mask_copy(params, &idx,
1620                                        &icmp_mask->hdr.icmp_cksum,
1621                                        sizeof(icmp_mask->hdr.icmp_cksum));
1622                 ulp_rte_prsr_mask_copy(params, &idx,
1623                                        &icmp_mask->hdr.icmp_ident,
1624                                        sizeof(icmp_mask->hdr.icmp_ident));
1625                 ulp_rte_prsr_mask_copy(params, &idx,
1626                                        &icmp_mask->hdr.icmp_seq_nb,
1627                                        sizeof(icmp_mask->hdr.icmp_seq_nb));
1628         }
1629         /* Add number of GRE header elements */
1630         params->field_idx += BNXT_ULP_PROTO_HDR_ICMP_NUM;
1631
1632         /* Update the hdr_bitmap with ICMP */
1633         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1634                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1635         else
1636                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1637         return BNXT_TF_RC_SUCCESS;
1638 }
1639
1640 /* Function to handle the parsing of RTE Flow item void Header */
1641 int32_t
1642 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1643                          struct ulp_rte_parser_params *params __rte_unused)
1644 {
1645         return BNXT_TF_RC_SUCCESS;
1646 }
1647
1648 /* Function to handle the parsing of RTE Flow action void Header. */
1649 int32_t
1650 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1651                          struct ulp_rte_parser_params *params __rte_unused)
1652 {
1653         return BNXT_TF_RC_SUCCESS;
1654 }
1655
1656 /* Function to handle the parsing of RTE Flow action Mark Header. */
1657 int32_t
1658 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1659                          struct ulp_rte_parser_params *param)
1660 {
1661         const struct rte_flow_action_mark *mark;
1662         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1663         uint32_t mark_id;
1664
1665         mark = action_item->conf;
1666         if (mark) {
1667                 mark_id = tfp_cpu_to_be_32(mark->id);
1668                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1669                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1670
1671                 /* Update the hdr_bitmap with vxlan */
1672                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1673                 return BNXT_TF_RC_SUCCESS;
1674         }
1675         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1676         return BNXT_TF_RC_ERROR;
1677 }
1678
1679 /* Function to handle the parsing of RTE Flow action RSS Header. */
1680 int32_t
1681 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1682                         struct ulp_rte_parser_params *param)
1683 {
1684         const struct rte_flow_action_rss *rss = action_item->conf;
1685
1686         if (rss) {
1687                 /* Update the hdr_bitmap with vxlan */
1688                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1689                 return BNXT_TF_RC_SUCCESS;
1690         }
1691         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1692         return BNXT_TF_RC_ERROR;
1693 }
1694
1695 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1696 int32_t
1697 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1698                                 struct ulp_rte_parser_params *params)
1699 {
1700         const struct rte_flow_action_vxlan_encap *vxlan_encap;
1701         const struct rte_flow_item *item;
1702         const struct rte_flow_item_eth *eth_spec;
1703         const struct rte_flow_item_ipv4 *ipv4_spec;
1704         const struct rte_flow_item_ipv6 *ipv6_spec;
1705         struct rte_flow_item_vxlan vxlan_spec;
1706         uint32_t vlan_num = 0, vlan_size = 0;
1707         uint32_t ip_size = 0, ip_type = 0;
1708         uint32_t vxlan_size = 0;
1709         uint8_t *buff;
1710         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1711         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1712                                     0x00, 0x40, 0x11};
1713         /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1714         const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1715                                 0x00, 0x11, 0xf6};
1716         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1717         struct ulp_rte_act_prop *ap = &params->act_prop;
1718         const uint8_t *tmp_buff;
1719
1720         vxlan_encap = action_item->conf;
1721         if (!vxlan_encap) {
1722                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1723                 return BNXT_TF_RC_ERROR;
1724         }
1725
1726         item = vxlan_encap->definition;
1727         if (!item) {
1728                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1729                 return BNXT_TF_RC_ERROR;
1730         }
1731
1732         if (!ulp_rte_item_skip_void(&item, 0))
1733                 return BNXT_TF_RC_ERROR;
1734
1735         /* must have ethernet header */
1736         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1737                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1738                 return BNXT_TF_RC_ERROR;
1739         }
1740         eth_spec = item->spec;
1741         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1742         ulp_encap_buffer_copy(buff,
1743                               eth_spec->dst.addr_bytes,
1744                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1745                               ULP_BUFFER_ALIGN_8_BYTE);
1746
1747         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1748         ulp_encap_buffer_copy(buff,
1749                               eth_spec->src.addr_bytes,
1750                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1751                               ULP_BUFFER_ALIGN_8_BYTE);
1752
1753         /* Goto the next item */
1754         if (!ulp_rte_item_skip_void(&item, 1))
1755                 return BNXT_TF_RC_ERROR;
1756
1757         /* May have vlan header */
1758         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1759                 vlan_num++;
1760                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1761                 ulp_encap_buffer_copy(buff,
1762                                       item->spec,
1763                                       sizeof(struct rte_flow_item_vlan),
1764                                       ULP_BUFFER_ALIGN_8_BYTE);
1765
1766                 if (!ulp_rte_item_skip_void(&item, 1))
1767                         return BNXT_TF_RC_ERROR;
1768         }
1769
1770         /* may have two vlan headers */
1771         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1772                 vlan_num++;
1773                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1774                        sizeof(struct rte_flow_item_vlan)],
1775                        item->spec,
1776                        sizeof(struct rte_flow_item_vlan));
1777                 if (!ulp_rte_item_skip_void(&item, 1))
1778                         return BNXT_TF_RC_ERROR;
1779         }
1780         /* Update the vlan count and size of more than one */
1781         if (vlan_num) {
1782                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1783                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1784                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1785                        &vlan_num,
1786                        sizeof(uint32_t));
1787                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1788                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1789                        &vlan_size,
1790                        sizeof(uint32_t));
1791         }
1792
1793         /* L3 must be IPv4, IPv6 */
1794         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1795                 ipv4_spec = item->spec;
1796                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1797
1798                 /* copy the ipv4 details */
1799                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1800                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1801                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1802                         ulp_encap_buffer_copy(buff,
1803                                               def_ipv4_hdr,
1804                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1805                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1806                                               ULP_BUFFER_ALIGN_8_BYTE);
1807                 } else {
1808                         /* Total length being ignored in the ip hdr. */
1809                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1810                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1811                         ulp_encap_buffer_copy(buff,
1812                                               tmp_buff,
1813                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1814                                               ULP_BUFFER_ALIGN_8_BYTE);
1815                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1816                              BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1817                         ulp_encap_buffer_copy(buff,
1818                                               &ipv4_spec->hdr.version_ihl,
1819                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1820                                               ULP_BUFFER_ALIGN_8_BYTE);
1821                 }
1822
1823                 /* Update the dst ip address in ip encap buffer */
1824                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1825                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1826                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1827                 ulp_encap_buffer_copy(buff,
1828                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1829                                       sizeof(ipv4_spec->hdr.dst_addr),
1830                                       ULP_BUFFER_ALIGN_8_BYTE);
1831
1832                 /* Update the src ip address */
1833                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1834                         BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1835                         sizeof(ipv4_spec->hdr.src_addr)];
1836                 ulp_encap_buffer_copy(buff,
1837                                       (const uint8_t *)&ipv4_spec->hdr.src_addr,
1838                                       sizeof(ipv4_spec->hdr.src_addr),
1839                                       ULP_BUFFER_ALIGN_8_BYTE);
1840
1841                 /* Update the ip size details */
1842                 ip_size = tfp_cpu_to_be_32(ip_size);
1843                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1844                        &ip_size, sizeof(uint32_t));
1845
1846                 /* update the ip type */
1847                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1848                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1849                        &ip_type, sizeof(uint32_t));
1850
1851                 /* update the computed field to notify it is ipv4 header */
1852                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1853                                     1);
1854
1855                 if (!ulp_rte_item_skip_void(&item, 1))
1856                         return BNXT_TF_RC_ERROR;
1857         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1858                 ipv6_spec = item->spec;
1859                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1860
1861                 /* copy the ipv6 details */
1862                 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1863                 if (ulp_buffer_is_empty(tmp_buff,
1864                                         BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1865                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1866                         ulp_encap_buffer_copy(buff,
1867                                               def_ipv6_hdr,
1868                                               sizeof(def_ipv6_hdr),
1869                                               ULP_BUFFER_ALIGN_8_BYTE);
1870                 } else {
1871                         /* The payload length being ignored in the ip hdr. */
1872                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1873                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1874                         ulp_encap_buffer_copy(buff,
1875                                               tmp_buff,
1876                                               BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1877                                               ULP_BUFFER_ALIGN_8_BYTE);
1878                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1879                                 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1880                                 BNXT_ULP_ENCAP_IPV6_DO];
1881                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1882                         ulp_encap_buffer_copy(buff,
1883                                               tmp_buff,
1884                                               BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1885                                               ULP_BUFFER_ALIGN_8_BYTE);
1886                 }
1887                 /* Update the dst ip address in ip encap buffer */
1888                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1889                         sizeof(def_ipv6_hdr)];
1890                 ulp_encap_buffer_copy(buff,
1891                                       (const uint8_t *)ipv6_spec->hdr.dst_addr,
1892                                       sizeof(ipv6_spec->hdr.dst_addr),
1893                                       ULP_BUFFER_ALIGN_8_BYTE);
1894
1895                 /* Update the src ip address */
1896                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1897                 ulp_encap_buffer_copy(buff,
1898                                       (const uint8_t *)ipv6_spec->hdr.src_addr,
1899                                       sizeof(ipv6_spec->hdr.src_addr),
1900                                       ULP_BUFFER_ALIGN_16_BYTE);
1901
1902                 /* Update the ip size details */
1903                 ip_size = tfp_cpu_to_be_32(ip_size);
1904                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1905                        &ip_size, sizeof(uint32_t));
1906
1907                  /* update the ip type */
1908                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1909                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1910                        &ip_type, sizeof(uint32_t));
1911
1912                 /* update the computed field to notify it is ipv6 header */
1913                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1914                                     1);
1915
1916                 if (!ulp_rte_item_skip_void(&item, 1))
1917                         return BNXT_TF_RC_ERROR;
1918         } else {
1919                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1920                 return BNXT_TF_RC_ERROR;
1921         }
1922
1923         /* L4 is UDP */
1924         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1925                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1926                 return BNXT_TF_RC_ERROR;
1927         }
1928         /* copy the udp details */
1929         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1930                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1931                               ULP_BUFFER_ALIGN_8_BYTE);
1932
1933         if (!ulp_rte_item_skip_void(&item, 1))
1934                 return BNXT_TF_RC_ERROR;
1935
1936         /* Finally VXLAN */
1937         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1938                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1939                 return BNXT_TF_RC_ERROR;
1940         }
1941         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1942         /* copy the vxlan details */
1943         memcpy(&vxlan_spec, item->spec, vxlan_size);
1944         vxlan_spec.flags = 0x08;
1945         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1946         if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1947                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1948                                       vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1949         } else {
1950                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1951                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1952                 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1953                                       (const uint8_t *)&vxlan_spec.vni,
1954                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1955         }
1956         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1957         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1958                &vxlan_size, sizeof(uint32_t));
1959
1960         /* update the hdr_bitmap with vxlan */
1961         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1962         return BNXT_TF_RC_SUCCESS;
1963 }
1964
1965 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1966 int32_t
1967 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1968                                 __rte_unused,
1969                                 struct ulp_rte_parser_params *params)
1970 {
1971         /* update the hdr_bitmap with vxlan */
1972         ULP_BITMAP_SET(params->act_bitmap.bits,
1973                        BNXT_ULP_ACT_BIT_VXLAN_DECAP);
1974         /* Update computational field with tunnel decap info */
1975         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1976         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1977         return BNXT_TF_RC_SUCCESS;
1978 }
1979
1980 /* Function to handle the parsing of RTE Flow action drop Header. */
1981 int32_t
1982 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1983                          struct ulp_rte_parser_params *params)
1984 {
1985         /* Update the hdr_bitmap with drop */
1986         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
1987         return BNXT_TF_RC_SUCCESS;
1988 }
1989
1990 /* Function to handle the parsing of RTE Flow action count. */
1991 int32_t
1992 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1993                           struct ulp_rte_parser_params *params)
1994 {
1995         const struct rte_flow_action_count *act_count;
1996         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1997
1998         act_count = action_item->conf;
1999         if (act_count) {
2000                 if (act_count->shared) {
2001                         BNXT_TF_DBG(ERR,
2002                                     "Parse Error:Shared count not supported\n");
2003                         return BNXT_TF_RC_PARSE_ERR;
2004                 }
2005                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2006                        &act_count->id,
2007                        BNXT_ULP_ACT_PROP_SZ_COUNT);
2008         }
2009
2010         /* Update the hdr_bitmap with count */
2011         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2012         return BNXT_TF_RC_SUCCESS;
2013 }
2014
2015 /* Function to handle the parsing of action ports. */
2016 static int32_t
2017 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2018                             uint32_t ifindex)
2019 {
2020         enum bnxt_ulp_direction_type dir;
2021         uint16_t pid_s;
2022         uint32_t pid;
2023         struct ulp_rte_act_prop *act = &param->act_prop;
2024         enum bnxt_ulp_intf_type port_type;
2025         uint32_t vnic_type;
2026
2027         /* Get the direction */
2028         dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
2029         if (dir == BNXT_ULP_DIR_EGRESS) {
2030                 /* For egress direction, fill vport */
2031                 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2032                         return BNXT_TF_RC_ERROR;
2033
2034                 pid = pid_s;
2035                 pid = rte_cpu_to_be_32(pid);
2036                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2037                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2038         } else {
2039                 /* For ingress direction, fill vnic */
2040                 port_type = ULP_COMP_FLD_IDX_RD(param,
2041                                                 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2042                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2043                         vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2044                 else
2045                         vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2046
2047                 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2048                                                  vnic_type, &pid_s))
2049                         return BNXT_TF_RC_ERROR;
2050
2051                 pid = pid_s;
2052                 pid = rte_cpu_to_be_32(pid);
2053                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2054                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2055         }
2056
2057         /* Update the action port set bit */
2058         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2059         return BNXT_TF_RC_SUCCESS;
2060 }
2061
2062 /* Function to handle the parsing of RTE Flow action PF. */
2063 int32_t
2064 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2065                        struct ulp_rte_parser_params *params)
2066 {
2067         uint32_t port_id;
2068         uint32_t ifindex;
2069         enum bnxt_ulp_intf_type intf_type;
2070
2071         /* Get the port id of the current device */
2072         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2073
2074         /* Get the port db ifindex */
2075         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2076                                               &ifindex)) {
2077                 BNXT_TF_DBG(ERR, "Invalid port id\n");
2078                 return BNXT_TF_RC_ERROR;
2079         }
2080
2081         /* Check the port is PF port */
2082         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2083         if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2084                 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2085                 return BNXT_TF_RC_ERROR;
2086         }
2087         /* Update the action properties */
2088         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2089         return ulp_rte_parser_act_port_set(params, ifindex);
2090 }
2091
2092 /* Function to handle the parsing of RTE Flow action VF. */
2093 int32_t
2094 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2095                        struct ulp_rte_parser_params *params)
2096 {
2097         const struct rte_flow_action_vf *vf_action;
2098         enum bnxt_ulp_intf_type intf_type;
2099         uint32_t ifindex;
2100         struct bnxt *bp;
2101
2102         vf_action = action_item->conf;
2103         if (!vf_action) {
2104                 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2105                 return BNXT_TF_RC_PARSE_ERR;
2106         }
2107
2108         if (vf_action->original) {
2109                 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2110                 return BNXT_TF_RC_PARSE_ERR;
2111         }
2112
2113         bp = bnxt_get_bp(params->port_id);
2114         if (bp == NULL) {
2115                 BNXT_TF_DBG(ERR, "Invalid bp\n");
2116                 return BNXT_TF_RC_ERROR;
2117         }
2118
2119         /* vf_action->id is a logical number which in this case is an
2120          * offset from the first VF. So, to get the absolute VF id, the
2121          * offset must be added to the absolute first vf id of that port.
2122          */
2123         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2124                                                  bp->first_vf_id + vf_action->id,
2125                                                  &ifindex)) {
2126                 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2127                 return BNXT_TF_RC_ERROR;
2128         }
2129         /* Check the port is VF port */
2130         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2131         if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2132             intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2133                 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2134                 return BNXT_TF_RC_ERROR;
2135         }
2136
2137         /* Update the action properties */
2138         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2139         return ulp_rte_parser_act_port_set(params, ifindex);
2140 }
2141
2142 /* Function to handle the parsing of RTE Flow action port_id. */
2143 int32_t
2144 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2145                             struct ulp_rte_parser_params *param)
2146 {
2147         const struct rte_flow_action_port_id *port_id = act_item->conf;
2148         uint32_t ifindex;
2149         enum bnxt_ulp_intf_type intf_type;
2150
2151         if (!port_id) {
2152                 BNXT_TF_DBG(ERR,
2153                             "ParseErr: Invalid Argument\n");
2154                 return BNXT_TF_RC_PARSE_ERR;
2155         }
2156         if (port_id->original) {
2157                 BNXT_TF_DBG(ERR,
2158                             "ParseErr:Portid Original not supported\n");
2159                 return BNXT_TF_RC_PARSE_ERR;
2160         }
2161
2162         /* Get the port db ifindex */
2163         if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2164                                               &ifindex)) {
2165                 BNXT_TF_DBG(ERR, "Invalid port id\n");
2166                 return BNXT_TF_RC_ERROR;
2167         }
2168
2169         /* Get the intf type */
2170         intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2171         if (!intf_type) {
2172                 BNXT_TF_DBG(ERR, "Invalid port type\n");
2173                 return BNXT_TF_RC_ERROR;
2174         }
2175
2176         /* Set the action port */
2177         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2178         return ulp_rte_parser_act_port_set(param, ifindex);
2179 }
2180
2181 /* Function to handle the parsing of RTE Flow action phy_port. */
2182 int32_t
2183 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2184                              struct ulp_rte_parser_params *prm)
2185 {
2186         const struct rte_flow_action_phy_port *phy_port;
2187         uint32_t pid;
2188         int32_t rc;
2189         uint16_t pid_s;
2190         enum bnxt_ulp_direction_type dir;
2191
2192         phy_port = action_item->conf;
2193         if (!phy_port) {
2194                 BNXT_TF_DBG(ERR,
2195                             "ParseErr: Invalid Argument\n");
2196                 return BNXT_TF_RC_PARSE_ERR;
2197         }
2198
2199         if (phy_port->original) {
2200                 BNXT_TF_DBG(ERR,
2201                             "Parse Err:Port Original not supported\n");
2202                 return BNXT_TF_RC_PARSE_ERR;
2203         }
2204         dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2205         if (dir != BNXT_ULP_DIR_EGRESS) {
2206                 BNXT_TF_DBG(ERR,
2207                             "Parse Err:Phy ports are valid only for egress\n");
2208                 return BNXT_TF_RC_PARSE_ERR;
2209         }
2210         /* Get the physical port details from port db */
2211         rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2212                                             &pid_s);
2213         if (rc) {
2214                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2215                 return -EINVAL;
2216         }
2217
2218         pid = pid_s;
2219         pid = rte_cpu_to_be_32(pid);
2220         memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2221                &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2222
2223         /* Update the action port set bit */
2224         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2225         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2226                             BNXT_ULP_INTF_TYPE_PHY_PORT);
2227         return BNXT_TF_RC_SUCCESS;
2228 }
2229
2230 /* Function to handle the parsing of RTE Flow action pop vlan. */
2231 int32_t
2232 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2233                                 struct ulp_rte_parser_params *params)
2234 {
2235         /* Update the act_bitmap with pop */
2236         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2237         return BNXT_TF_RC_SUCCESS;
2238 }
2239
2240 /* Function to handle the parsing of RTE Flow action push vlan. */
2241 int32_t
2242 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2243                                  struct ulp_rte_parser_params *params)
2244 {
2245         const struct rte_flow_action_of_push_vlan *push_vlan;
2246         uint16_t ethertype;
2247         struct ulp_rte_act_prop *act = &params->act_prop;
2248
2249         push_vlan = action_item->conf;
2250         if (push_vlan) {
2251                 ethertype = push_vlan->ethertype;
2252                 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2253                         BNXT_TF_DBG(ERR,
2254                                     "Parse Err: Ethertype not supported\n");
2255                         return BNXT_TF_RC_PARSE_ERR;
2256                 }
2257                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2258                        &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2259                 /* Update the hdr_bitmap with push vlan */
2260                 ULP_BITMAP_SET(params->act_bitmap.bits,
2261                                BNXT_ULP_ACT_BIT_PUSH_VLAN);
2262                 return BNXT_TF_RC_SUCCESS;
2263         }
2264         BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2265         return BNXT_TF_RC_ERROR;
2266 }
2267
2268 /* Function to handle the parsing of RTE Flow action set vlan id. */
2269 int32_t
2270 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2271                                     struct ulp_rte_parser_params *params)
2272 {
2273         const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2274         uint32_t vid;
2275         struct ulp_rte_act_prop *act = &params->act_prop;
2276
2277         vlan_vid = action_item->conf;
2278         if (vlan_vid && vlan_vid->vlan_vid) {
2279                 vid = vlan_vid->vlan_vid;
2280                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2281                        &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2282                 /* Update the hdr_bitmap with vlan vid */
2283                 ULP_BITMAP_SET(params->act_bitmap.bits,
2284                                BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2285                 return BNXT_TF_RC_SUCCESS;
2286         }
2287         BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2288         return BNXT_TF_RC_ERROR;
2289 }
2290
2291 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2292 int32_t
2293 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2294                                     struct ulp_rte_parser_params *params)
2295 {
2296         const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2297         uint8_t pcp;
2298         struct ulp_rte_act_prop *act = &params->act_prop;
2299
2300         vlan_pcp = action_item->conf;
2301         if (vlan_pcp) {
2302                 pcp = vlan_pcp->vlan_pcp;
2303                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2304                        &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2305                 /* Update the hdr_bitmap with vlan vid */
2306                 ULP_BITMAP_SET(params->act_bitmap.bits,
2307                                BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2308                 return BNXT_TF_RC_SUCCESS;
2309         }
2310         BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2311         return BNXT_TF_RC_ERROR;
2312 }
2313
2314 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2315 int32_t
2316 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2317                                  struct ulp_rte_parser_params *params)
2318 {
2319         const struct rte_flow_action_set_ipv4 *set_ipv4;
2320         struct ulp_rte_act_prop *act = &params->act_prop;
2321
2322         set_ipv4 = action_item->conf;
2323         if (set_ipv4) {
2324                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2325                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2326                 /* Update the hdr_bitmap with set ipv4 src */
2327                 ULP_BITMAP_SET(params->act_bitmap.bits,
2328                                BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2329                 return BNXT_TF_RC_SUCCESS;
2330         }
2331         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2332         return BNXT_TF_RC_ERROR;
2333 }
2334
2335 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2336 int32_t
2337 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2338                                  struct ulp_rte_parser_params *params)
2339 {
2340         const struct rte_flow_action_set_ipv4 *set_ipv4;
2341         struct ulp_rte_act_prop *act = &params->act_prop;
2342
2343         set_ipv4 = action_item->conf;
2344         if (set_ipv4) {
2345                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2346                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2347                 /* Update the hdr_bitmap with set ipv4 dst */
2348                 ULP_BITMAP_SET(params->act_bitmap.bits,
2349                                BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2350                 return BNXT_TF_RC_SUCCESS;
2351         }
2352         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2353         return BNXT_TF_RC_ERROR;
2354 }
2355
2356 /* Function to handle the parsing of RTE Flow action set tp src.*/
2357 int32_t
2358 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2359                                struct ulp_rte_parser_params *params)
2360 {
2361         const struct rte_flow_action_set_tp *set_tp;
2362         struct ulp_rte_act_prop *act = &params->act_prop;
2363
2364         set_tp = action_item->conf;
2365         if (set_tp) {
2366                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2367                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2368                 /* Update the hdr_bitmap with set tp src */
2369                 ULP_BITMAP_SET(params->act_bitmap.bits,
2370                                BNXT_ULP_ACT_BIT_SET_TP_SRC);
2371                 return BNXT_TF_RC_SUCCESS;
2372         }
2373
2374         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2375         return BNXT_TF_RC_ERROR;
2376 }
2377
2378 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2379 int32_t
2380 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2381                                struct ulp_rte_parser_params *params)
2382 {
2383         const struct rte_flow_action_set_tp *set_tp;
2384         struct ulp_rte_act_prop *act = &params->act_prop;
2385
2386         set_tp = action_item->conf;
2387         if (set_tp) {
2388                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2389                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2390                 /* Update the hdr_bitmap with set tp dst */
2391                 ULP_BITMAP_SET(params->act_bitmap.bits,
2392                                BNXT_ULP_ACT_BIT_SET_TP_DST);
2393                 return BNXT_TF_RC_SUCCESS;
2394         }
2395
2396         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2397         return BNXT_TF_RC_ERROR;
2398 }
2399
2400 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2401 int32_t
2402 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2403                             struct ulp_rte_parser_params *params)
2404 {
2405         /* Update the act_bitmap with dec ttl */
2406         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2407         return BNXT_TF_RC_SUCCESS;
2408 }
2409
2410 /* Function to handle the parsing of RTE Flow action JUMP */
2411 int32_t
2412 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2413                             struct ulp_rte_parser_params *params)
2414 {
2415         /* Update the act_bitmap with dec ttl */
2416         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2417         return BNXT_TF_RC_SUCCESS;
2418 }
2419
2420 int32_t
2421 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2422                            struct ulp_rte_parser_params *params)
2423 {
2424         const struct rte_flow_action_sample *sample;
2425         int ret;
2426
2427         sample = action_item->conf;
2428
2429         /* if SAMPLE bit is set it means this sample action is nested within the
2430          * actions of another sample action; this is not allowed
2431          */
2432         if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2433                              BNXT_ULP_ACT_BIT_SAMPLE))
2434                 return BNXT_TF_RC_ERROR;
2435
2436         /* a sample action is only allowed as a shared action */
2437         if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2438                               BNXT_ULP_ACT_BIT_SHARED))
2439                 return BNXT_TF_RC_ERROR;
2440
2441         /* only a ratio of 1 i.e. 100% is supported */
2442         if (sample->ratio != 1)
2443                 return BNXT_TF_RC_ERROR;
2444
2445         if (!sample->actions)
2446                 return BNXT_TF_RC_ERROR;
2447
2448         /* parse the nested actions for a sample action */
2449         ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2450         if (ret == BNXT_TF_RC_SUCCESS)
2451                 /* Update the act_bitmap with sample */
2452                 ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_SAMPLE);
2453
2454         return ret;
2455 }