net/bnxt: identify duplicate flows
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
14 #include "tfp.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
18 #include "ulp_tun.h"
19
20 /* Local defines for the parsing functions */
21 #define ULP_VLAN_PRIORITY_SHIFT         13 /* First 3 bits */
22 #define ULP_VLAN_PRIORITY_MASK          0x700
23 #define ULP_VLAN_TAG_MASK               0xFFF /* Last 12 bits*/
24 #define ULP_UDP_PORT_VXLAN              4789
25
26 /* Utility function to skip the void items. */
27 static inline int32_t
28 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 {
30         if (!*item)
31                 return 0;
32         if (increment)
33                 (*item)++;
34         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
35                 (*item)++;
36         if (*item)
37                 return 1;
38         return 0;
39 }
40
41 /* Utility function to update the field_bitmap */
42 static void
43 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
44                                    uint32_t idx)
45 {
46         struct ulp_rte_hdr_field *field;
47
48         field = &params->hdr_field[idx];
49         if (ulp_bitmap_notzero(field->mask, field->size)) {
50                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
51                 /* Not exact match */
52                 if (!ulp_bitmap_is_ones(field->mask, field->size))
53                         ULP_BITMAP_SET(params->fld_bitmap.bits,
54                                        BNXT_ULP_MATCH_TYPE_BITMASK_WM);
55         } else {
56                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
57         }
58 }
59
60 /* Utility function to copy field spec items */
61 static struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
63                         const void *buffer,
64                         uint32_t size)
65 {
66         field->size = size;
67         memcpy(field->spec, buffer, field->size);
68         field++;
69         return field;
70 }
71
72 /* Utility function to copy field masks items */
73 static void
74 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
75                        uint32_t *idx,
76                        const void *buffer,
77                        uint32_t size)
78 {
79         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
80
81         memcpy(field->mask, buffer, size);
82         ulp_rte_parser_field_bitmap_update(params, *idx);
83         *idx = *idx + 1;
84 }
85
86 /* Utility function to ignore field masks items */
87 static void
88 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
89                          uint32_t *idx,
90                          const void *buffer __rte_unused,
91                          uint32_t size __rte_unused)
92 {
93         *idx = *idx + 1;
94 }
95
96 /*
97  * Function to handle the parsing of RTE Flows and placing
98  * the RTE flow items into the ulp structures.
99  */
100 int32_t
101 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
102                               struct ulp_rte_parser_params *params)
103 {
104         const struct rte_flow_item *item = pattern;
105         struct bnxt_ulp_rte_hdr_info *hdr_info;
106
107         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
108
109         /* Set the computed flags for no vlan tags before parsing */
110         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
111         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
112
113         /* Parse all the items in the pattern */
114         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
115                 /* get the header information from the flow_hdr_info table */
116                 hdr_info = &ulp_hdr_info[item->type];
117                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
118                         BNXT_TF_DBG(ERR,
119                                     "Truflow parser does not support type %d\n",
120                                     item->type);
121                         return BNXT_TF_RC_PARSE_ERR;
122                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
123                         /* call the registered callback handler */
124                         if (hdr_info->proto_hdr_func) {
125                                 if (hdr_info->proto_hdr_func(item, params) !=
126                                     BNXT_TF_RC_SUCCESS) {
127                                         return BNXT_TF_RC_ERROR;
128                                 }
129                         }
130                 }
131                 item++;
132         }
133         /* update the implied SVIF */
134         return ulp_rte_parser_implicit_match_port_process(params);
135 }
136
137 /*
138  * Function to handle the parsing of RTE Flows and placing
139  * the RTE flow actions into the ulp structures.
140  */
141 int32_t
142 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
143                               struct ulp_rte_parser_params *params)
144 {
145         const struct rte_flow_action *action_item = actions;
146         struct bnxt_ulp_rte_act_info *hdr_info;
147
148         /* Parse all the items in the pattern */
149         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
150                 /* get the header information from the flow_hdr_info table */
151                 hdr_info = &ulp_act_info[action_item->type];
152                 if (hdr_info->act_type ==
153                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
154                         BNXT_TF_DBG(ERR,
155                                     "Truflow parser does not support act %u\n",
156                                     action_item->type);
157                         return BNXT_TF_RC_ERROR;
158                 } else if (hdr_info->act_type ==
159                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
160                         /* call the registered callback handler */
161                         if (hdr_info->proto_act_func) {
162                                 if (hdr_info->proto_act_func(action_item,
163                                                              params) !=
164                                     BNXT_TF_RC_SUCCESS) {
165                                         return BNXT_TF_RC_ERROR;
166                                 }
167                         }
168                 }
169                 action_item++;
170         }
171         /* update the implied port details */
172         ulp_rte_parser_implicit_act_port_process(params);
173         return BNXT_TF_RC_SUCCESS;
174 }
175
176 /*
177  * Function to handle the post processing of the computed
178  * fields for the interface.
179  */
180 static void
181 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
182 {
183         uint32_t ifindex;
184         uint16_t port_id, parif;
185         uint32_t mtype;
186         enum bnxt_ulp_direction_type dir;
187
188         /* get the direction details */
189         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
190
191         /* read the port id details */
192         port_id = ULP_COMP_FLD_IDX_RD(params,
193                                       BNXT_ULP_CF_IDX_INCOMING_IF);
194         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
195                                               port_id,
196                                               &ifindex)) {
197                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
198                 return;
199         }
200
201         if (dir == BNXT_ULP_DIR_INGRESS) {
202                 /* Set port PARIF */
203                 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
204                                           BNXT_ULP_PHY_PORT_PARIF, &parif)) {
205                         BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
206                         return;
207                 }
208                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
209                                     parif);
210         } else {
211                 /* Get the match port type */
212                 mtype = ULP_COMP_FLD_IDX_RD(params,
213                                             BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
214                 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
215                         ULP_COMP_FLD_IDX_WR(params,
216                                             BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
217                                             1);
218                         /* Set VF func PARIF */
219                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
220                                                   BNXT_ULP_VF_FUNC_PARIF,
221                                                   &parif)) {
222                                 BNXT_TF_DBG(ERR,
223                                             "ParseErr:ifindex is not valid\n");
224                                 return;
225                         }
226                         ULP_COMP_FLD_IDX_WR(params,
227                                             BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
228                                             parif);
229
230                 } else {
231                         /* Set DRV func PARIF */
232                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
233                                                   BNXT_ULP_DRV_FUNC_PARIF,
234                                                   &parif)) {
235                                 BNXT_TF_DBG(ERR,
236                                             "ParseErr:ifindex is not valid\n");
237                                 return;
238                         }
239                         ULP_COMP_FLD_IDX_WR(params,
240                                             BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
241                                             parif);
242                 }
243         }
244 }
245
246 static int32_t
247 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
248 {
249         enum bnxt_ulp_intf_type match_port_type, act_port_type;
250         enum bnxt_ulp_direction_type dir;
251         uint32_t act_port_set;
252
253         /* Get the computed details */
254         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
255         match_port_type = ULP_COMP_FLD_IDX_RD(params,
256                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
257         act_port_type = ULP_COMP_FLD_IDX_RD(params,
258                                             BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
259         act_port_set = ULP_COMP_FLD_IDX_RD(params,
260                                            BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
261
262         /* set the flow direction in the proto and action header */
263         if (dir == BNXT_ULP_DIR_EGRESS) {
264                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
265                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
266                 ULP_BITMAP_SET(params->act_bitmap.bits,
267                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
268         }
269
270         /* calculate the VF to VF flag */
271         if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
272             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
273                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
274
275         /* Update the decrement ttl computational fields */
276         if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
277                              BNXT_ULP_ACTION_BIT_DEC_TTL)) {
278                 /*
279                  * Check that vxlan proto is included and vxlan decap
280                  * action is not set then decrement tunnel ttl.
281                  * Similarly add GRE and NVGRE in future.
282                  */
283                 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
284                                       BNXT_ULP_HDR_BIT_T_VXLAN) &&
285                     !ULP_BITMAP_ISSET(params->act_bitmap.bits,
286                                       BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
287                         ULP_COMP_FLD_IDX_WR(params,
288                                             BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
289                 } else {
290                         ULP_COMP_FLD_IDX_WR(params,
291                                             BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
292                 }
293         }
294
295         /* Merge the hdr_fp_bit into the proto header bit */
296         params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
297
298         /* Update the comp fld fid */
299         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
300
301         /* Update the computed interface parameters */
302         bnxt_ulp_comp_fld_intf_update(params);
303
304         /* TBD: Handle the flow rejection scenarios */
305         return 0;
306 }
307
308 /*
309  * Function to handle the post processing of the parsing details
310  */
311 int32_t
312 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
313 {
314         ulp_post_process_normal_flow(params);
315         return ulp_post_process_tun_flow(params);
316 }
317
318 /*
319  * Function to compute the flow direction based on the match port details
320  */
321 static void
322 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
323 {
324         enum bnxt_ulp_intf_type match_port_type;
325
326         /* Get the match port type */
327         match_port_type = ULP_COMP_FLD_IDX_RD(params,
328                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
329
330         /* If ingress flow and matchport is vf rep then dir is egress*/
331         if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
332             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
333                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
334                                     BNXT_ULP_DIR_EGRESS);
335         } else {
336                 /* Assign the input direction */
337                 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
338                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
339                                             BNXT_ULP_DIR_INGRESS);
340                 else
341                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
342                                             BNXT_ULP_DIR_EGRESS);
343         }
344 }
345
346 /* Function to handle the parsing of RTE Flow item PF Header. */
347 static int32_t
348 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
349                         uint32_t ifindex,
350                         uint16_t mask)
351 {
352         uint16_t svif;
353         enum bnxt_ulp_direction_type dir;
354         struct ulp_rte_hdr_field *hdr_field;
355         enum bnxt_ulp_svif_type svif_type;
356         enum bnxt_ulp_intf_type port_type;
357
358         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
359             BNXT_ULP_INVALID_SVIF_VAL) {
360                 BNXT_TF_DBG(ERR,
361                             "SVIF already set,multiple source not support'd\n");
362                 return BNXT_TF_RC_ERROR;
363         }
364
365         /* Get port type details */
366         port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
367         if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
368                 BNXT_TF_DBG(ERR, "Invalid port type\n");
369                 return BNXT_TF_RC_ERROR;
370         }
371
372         /* Update the match port type */
373         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
374
375         /* compute the direction */
376         bnxt_ulp_rte_parser_direction_compute(params);
377
378         /* Get the computed direction */
379         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
380         if (dir == BNXT_ULP_DIR_INGRESS) {
381                 svif_type = BNXT_ULP_PHY_PORT_SVIF;
382         } else {
383                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
384                         svif_type = BNXT_ULP_VF_FUNC_SVIF;
385                 else
386                         svif_type = BNXT_ULP_DRV_FUNC_SVIF;
387         }
388         ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
389                              &svif);
390         svif = rte_cpu_to_be_16(svif);
391         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
392         memcpy(hdr_field->spec, &svif, sizeof(svif));
393         memcpy(hdr_field->mask, &mask, sizeof(mask));
394         hdr_field->size = sizeof(svif);
395         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
396                             rte_be_to_cpu_16(svif));
397         return BNXT_TF_RC_SUCCESS;
398 }
399
400 /* Function to handle the parsing of the RTE port id */
401 int32_t
402 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
403 {
404         uint16_t port_id = 0;
405         uint16_t svif_mask = 0xFFFF;
406         uint32_t ifindex;
407         int32_t rc = BNXT_TF_RC_ERROR;
408
409         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
410             BNXT_ULP_INVALID_SVIF_VAL)
411                 return BNXT_TF_RC_SUCCESS;
412
413         /* SVIF not set. So get the port id */
414         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
415
416         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
417                                               port_id,
418                                               &ifindex)) {
419                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
420                 return rc;
421         }
422
423         /* Update the SVIF details */
424         rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
425         return rc;
426 }
427
428 /* Function to handle the implicit action port id */
429 int32_t
430 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
431 {
432         struct rte_flow_action action_item = {0};
433         struct rte_flow_action_port_id port_id = {0};
434
435         /* Read the action port set bit */
436         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
437                 /* Already set, so just exit */
438                 return BNXT_TF_RC_SUCCESS;
439         }
440         port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
441         action_item.conf = &port_id;
442
443         /* Update the action port based on incoming port */
444         ulp_rte_port_id_act_handler(&action_item, params);
445
446         /* Reset the action port set bit */
447         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
448         return BNXT_TF_RC_SUCCESS;
449 }
450
451 /* Function to handle the parsing of RTE Flow item PF Header. */
452 int32_t
453 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
454                        struct ulp_rte_parser_params *params)
455 {
456         uint16_t port_id = 0;
457         uint16_t svif_mask = 0xFFFF;
458         uint32_t ifindex;
459
460         /* Get the implicit port id */
461         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
462
463         /* perform the conversion from dpdk port to bnxt ifindex */
464         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
465                                               port_id,
466                                               &ifindex)) {
467                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
468                 return BNXT_TF_RC_ERROR;
469         }
470
471         /* Update the SVIF details */
472         return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
473 }
474
475 /* Function to handle the parsing of RTE Flow item VF Header. */
476 int32_t
477 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
478                        struct ulp_rte_parser_params *params)
479 {
480         const struct rte_flow_item_vf *vf_spec = item->spec;
481         const struct rte_flow_item_vf *vf_mask = item->mask;
482         uint16_t mask = 0;
483         uint32_t ifindex;
484         int32_t rc = BNXT_TF_RC_PARSE_ERR;
485
486         /* Get VF rte_flow_item for Port details */
487         if (!vf_spec) {
488                 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
489                 return rc;
490         }
491         if (!vf_mask) {
492                 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
493                 return rc;
494         }
495         mask = vf_mask->id;
496
497         /* perform the conversion from VF Func id to bnxt ifindex */
498         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
499                                                  vf_spec->id,
500                                                  &ifindex)) {
501                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
502                 return rc;
503         }
504         /* Update the SVIF details */
505         return ulp_rte_parser_svif_set(params, ifindex, mask);
506 }
507
508 /* Function to handle the parsing of RTE Flow item port id  Header. */
509 int32_t
510 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
511                             struct ulp_rte_parser_params *params)
512 {
513         const struct rte_flow_item_port_id *port_spec = item->spec;
514         const struct rte_flow_item_port_id *port_mask = item->mask;
515         uint16_t mask = 0;
516         int32_t rc = BNXT_TF_RC_PARSE_ERR;
517         uint32_t ifindex;
518
519         if (!port_spec) {
520                 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
521                 return rc;
522         }
523         if (!port_mask) {
524                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
525                 return rc;
526         }
527         mask = port_mask->id;
528
529         /* perform the conversion from dpdk port to bnxt ifindex */
530         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
531                                               port_spec->id,
532                                               &ifindex)) {
533                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
534                 return rc;
535         }
536         /* Update the SVIF details */
537         return ulp_rte_parser_svif_set(params, ifindex, mask);
538 }
539
540 /* Function to handle the parsing of RTE Flow item phy port Header. */
541 int32_t
542 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
543                              struct ulp_rte_parser_params *params)
544 {
545         const struct rte_flow_item_phy_port *port_spec = item->spec;
546         const struct rte_flow_item_phy_port *port_mask = item->mask;
547         uint16_t mask = 0;
548         int32_t rc = BNXT_TF_RC_ERROR;
549         uint16_t svif;
550         enum bnxt_ulp_direction_type dir;
551         struct ulp_rte_hdr_field *hdr_field;
552
553         /* Copy the rte_flow_item for phy port into hdr_field */
554         if (!port_spec) {
555                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
556                 return rc;
557         }
558         if (!port_mask) {
559                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
560                 return rc;
561         }
562         mask = port_mask->index;
563
564         /* Update the match port type */
565         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
566                             BNXT_ULP_INTF_TYPE_PHY_PORT);
567
568         /* Compute the Hw direction */
569         bnxt_ulp_rte_parser_direction_compute(params);
570
571         /* Direction validation */
572         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
573         if (dir == BNXT_ULP_DIR_EGRESS) {
574                 BNXT_TF_DBG(ERR,
575                             "Parse Err:Phy ports are valid only for ingress\n");
576                 return BNXT_TF_RC_PARSE_ERR;
577         }
578
579         /* Get the physical port details from port db */
580         rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
581                                            &svif);
582         if (rc) {
583                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
584                 return BNXT_TF_RC_PARSE_ERR;
585         }
586
587         /* Update the SVIF details */
588         svif = rte_cpu_to_be_16(svif);
589         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
590         memcpy(hdr_field->spec, &svif, sizeof(svif));
591         memcpy(hdr_field->mask, &mask, sizeof(mask));
592         hdr_field->size = sizeof(svif);
593         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
594                             rte_be_to_cpu_16(svif));
595         return BNXT_TF_RC_SUCCESS;
596 }
597
598 /* Function to handle the update of proto header based on field values */
599 static void
600 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
601                              uint16_t type, uint32_t in_flag)
602 {
603         if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
604                 if (in_flag) {
605                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
606                                        BNXT_ULP_HDR_BIT_I_IPV4);
607                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
608                 } else {
609                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
610                                        BNXT_ULP_HDR_BIT_O_IPV4);
611                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
612                 }
613         } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
614                 if (in_flag) {
615                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
616                                        BNXT_ULP_HDR_BIT_I_IPV6);
617                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
618                 } else {
619                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
620                                        BNXT_ULP_HDR_BIT_O_IPV6);
621                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
622                 }
623         }
624 }
625
626 /* Internal Function to identify broadcast or multicast packets */
627 static int32_t
628 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
629 {
630         if (rte_is_multicast_ether_addr(eth_addr) ||
631             rte_is_broadcast_ether_addr(eth_addr)) {
632                 BNXT_TF_DBG(DEBUG,
633                             "No support for bcast or mcast addr offload\n");
634                 return 1;
635         }
636         return 0;
637 }
638
639 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
640 int32_t
641 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
642                         struct ulp_rte_parser_params *params)
643 {
644         const struct rte_flow_item_eth *eth_spec = item->spec;
645         const struct rte_flow_item_eth *eth_mask = item->mask;
646         struct ulp_rte_hdr_field *field;
647         uint32_t idx = params->field_idx;
648         uint32_t size;
649         uint16_t eth_type = 0;
650         uint32_t inner_flag = 0;
651
652         /*
653          * Copy the rte_flow_item for eth into hdr_field using ethernet
654          * header fields
655          */
656         if (eth_spec) {
657                 size = sizeof(eth_spec->dst.addr_bytes);
658                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
659                                                 eth_spec->dst.addr_bytes,
660                                                 size);
661                 /* Todo: work around to avoid multicast and broadcast addr */
662                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->dst))
663                         return BNXT_TF_RC_PARSE_ERR;
664
665                 size = sizeof(eth_spec->src.addr_bytes);
666                 field = ulp_rte_parser_fld_copy(field,
667                                                 eth_spec->src.addr_bytes,
668                                                 size);
669                 /* Todo: work around to avoid multicast and broadcast addr */
670                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->src))
671                         return BNXT_TF_RC_PARSE_ERR;
672
673                 field = ulp_rte_parser_fld_copy(field,
674                                                 &eth_spec->type,
675                                                 sizeof(eth_spec->type));
676                 eth_type = eth_spec->type;
677         }
678         if (eth_mask) {
679                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
680                                        sizeof(eth_mask->dst.addr_bytes));
681                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
682                                        sizeof(eth_mask->src.addr_bytes));
683                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
684                                        sizeof(eth_mask->type));
685         }
686         /* Add number of Eth header elements */
687         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
688
689         /* Update the protocol hdr bitmap */
690         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
691                              BNXT_ULP_HDR_BIT_O_ETH) ||
692             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
693                              BNXT_ULP_HDR_BIT_O_IPV4) ||
694             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
695                              BNXT_ULP_HDR_BIT_O_IPV6) ||
696             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697                              BNXT_ULP_HDR_BIT_O_UDP) ||
698             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699                              BNXT_ULP_HDR_BIT_O_TCP)) {
700                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
701                 inner_flag = 1;
702         } else {
703                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
704         }
705         /* Update the field protocol hdr bitmap */
706         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
707
708         return BNXT_TF_RC_SUCCESS;
709 }
710
711 /* Function to handle the parsing of RTE Flow item Vlan Header. */
712 int32_t
713 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
714                          struct ulp_rte_parser_params *params)
715 {
716         const struct rte_flow_item_vlan *vlan_spec = item->spec;
717         const struct rte_flow_item_vlan *vlan_mask = item->mask;
718         struct ulp_rte_hdr_field *field;
719         struct ulp_rte_hdr_bitmap       *hdr_bit;
720         uint32_t idx = params->field_idx;
721         uint16_t vlan_tag, priority;
722         uint32_t outer_vtag_num;
723         uint32_t inner_vtag_num;
724         uint16_t eth_type = 0;
725         uint32_t inner_flag = 0;
726
727         /*
728          * Copy the rte_flow_item for vlan into hdr_field using Vlan
729          * header fields
730          */
731         if (vlan_spec) {
732                 vlan_tag = ntohs(vlan_spec->tci);
733                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
734                 vlan_tag &= ULP_VLAN_TAG_MASK;
735                 vlan_tag = htons(vlan_tag);
736
737                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
738                                                 &priority,
739                                                 sizeof(priority));
740                 field = ulp_rte_parser_fld_copy(field,
741                                                 &vlan_tag,
742                                                 sizeof(vlan_tag));
743                 field = ulp_rte_parser_fld_copy(field,
744                                                 &vlan_spec->inner_type,
745                                                 sizeof(vlan_spec->inner_type));
746                 eth_type = vlan_spec->inner_type;
747         }
748
749         if (vlan_mask) {
750                 vlan_tag = ntohs(vlan_mask->tci);
751                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
752                 vlan_tag &= 0xfff;
753
754                 /*
755                  * the storage for priority and vlan tag is 2 bytes
756                  * The mask of priority which is 3 bits if it is all 1's
757                  * then make the rest bits 13 bits as 1's
758                  * so that it is matched as exact match.
759                  */
760                 if (priority == ULP_VLAN_PRIORITY_MASK)
761                         priority |= ~ULP_VLAN_PRIORITY_MASK;
762                 if (vlan_tag == ULP_VLAN_TAG_MASK)
763                         vlan_tag |= ~ULP_VLAN_TAG_MASK;
764                 vlan_tag = htons(vlan_tag);
765
766                 /*
767                  * The priority field is ignored since OVS is setting it as
768                  * wild card match and it is not supported. This is a work
769                  * around and shall be addressed in the future.
770                  */
771                 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
772                                          sizeof(priority));
773
774                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
775                                        sizeof(vlan_tag));
776                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
777                                        sizeof(vlan_mask->inner_type));
778         }
779         /* Set the field index to new incremented value */
780         params->field_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
781
782         /* Get the outer tag and inner tag counts */
783         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
784                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
785         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
786                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
787
788         /* Update the hdr_bitmap of the vlans */
789         hdr_bit = &params->hdr_bitmap;
790         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
791             !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
792             !outer_vtag_num) {
793                 /* Update the vlan tag num */
794                 outer_vtag_num++;
795                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
796                                     outer_vtag_num);
797                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
798                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
799                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
800                                BNXT_ULP_HDR_BIT_OO_VLAN);
801         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
802                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
803                    outer_vtag_num == 1) {
804                 /* update the vlan tag num */
805                 outer_vtag_num++;
806                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
807                                     outer_vtag_num);
808                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
809                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
810                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
811                                BNXT_ULP_HDR_BIT_OI_VLAN);
812         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
813                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
814                    !inner_vtag_num) {
815                 /* update the vlan tag num */
816                 inner_vtag_num++;
817                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
818                                     inner_vtag_num);
819                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
820                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
821                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
822                                BNXT_ULP_HDR_BIT_IO_VLAN);
823                 inner_flag = 1;
824         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
825                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
826                    inner_vtag_num == 1) {
827                 /* update the vlan tag num */
828                 inner_vtag_num++;
829                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
830                                     inner_vtag_num);
831                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
832                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
833                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
834                                BNXT_ULP_HDR_BIT_II_VLAN);
835                 inner_flag = 1;
836         } else {
837                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
838                 return BNXT_TF_RC_ERROR;
839         }
840         /* Update the field protocol hdr bitmap */
841         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
842         return BNXT_TF_RC_SUCCESS;
843 }
844
845 /* Function to handle the update of proto header based on field values */
846 static void
847 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
848                              uint8_t proto, uint32_t in_flag)
849 {
850         if (proto == IPPROTO_UDP) {
851                 if (in_flag) {
852                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
853                                        BNXT_ULP_HDR_BIT_I_UDP);
854                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
855                 } else {
856                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
857                                        BNXT_ULP_HDR_BIT_O_UDP);
858                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
859                 }
860         } else if (proto == IPPROTO_TCP) {
861                 if (in_flag) {
862                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
863                                        BNXT_ULP_HDR_BIT_I_TCP);
864                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
865                 } else {
866                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
867                                        BNXT_ULP_HDR_BIT_O_TCP);
868                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
869                 }
870         }
871 }
872
873 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
874 int32_t
875 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
876                          struct ulp_rte_parser_params *params)
877 {
878         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
879         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
880         struct ulp_rte_hdr_field *field;
881         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
882         uint32_t idx = params->field_idx;
883         uint32_t size;
884         uint8_t proto = 0;
885         uint32_t inner_flag = 0;
886         uint32_t cnt;
887
888         /* validate there are no 3rd L3 header */
889         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
890         if (cnt == 2) {
891                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
892                 return BNXT_TF_RC_ERROR;
893         }
894
895         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
896                               BNXT_ULP_HDR_BIT_O_ETH) &&
897             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
898                               BNXT_ULP_HDR_BIT_I_ETH)) {
899                 /* Since F2 flow does not include eth item, when parser detects
900                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
901                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
902                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
903                  * This will allow the parser post processor to update the
904                  * t_dmac in hdr_field[o_eth.dmac]
905                  */
906                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
907                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
908                 params->field_idx = idx;
909         }
910
911         /*
912          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
913          * header fields
914          */
915         if (ipv4_spec) {
916                 size = sizeof(ipv4_spec->hdr.version_ihl);
917                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
918                                                 &ipv4_spec->hdr.version_ihl,
919                                                 size);
920                 size = sizeof(ipv4_spec->hdr.type_of_service);
921                 field = ulp_rte_parser_fld_copy(field,
922                                                 &ipv4_spec->hdr.type_of_service,
923                                                 size);
924                 size = sizeof(ipv4_spec->hdr.total_length);
925                 field = ulp_rte_parser_fld_copy(field,
926                                                 &ipv4_spec->hdr.total_length,
927                                                 size);
928                 size = sizeof(ipv4_spec->hdr.packet_id);
929                 field = ulp_rte_parser_fld_copy(field,
930                                                 &ipv4_spec->hdr.packet_id,
931                                                 size);
932                 size = sizeof(ipv4_spec->hdr.fragment_offset);
933                 field = ulp_rte_parser_fld_copy(field,
934                                                 &ipv4_spec->hdr.fragment_offset,
935                                                 size);
936                 size = sizeof(ipv4_spec->hdr.time_to_live);
937                 field = ulp_rte_parser_fld_copy(field,
938                                                 &ipv4_spec->hdr.time_to_live,
939                                                 size);
940                 size = sizeof(ipv4_spec->hdr.next_proto_id);
941                 field = ulp_rte_parser_fld_copy(field,
942                                                 &ipv4_spec->hdr.next_proto_id,
943                                                 size);
944                 proto = ipv4_spec->hdr.next_proto_id;
945                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
946                 field = ulp_rte_parser_fld_copy(field,
947                                                 &ipv4_spec->hdr.hdr_checksum,
948                                                 size);
949                 size = sizeof(ipv4_spec->hdr.src_addr);
950                 field = ulp_rte_parser_fld_copy(field,
951                                                 &ipv4_spec->hdr.src_addr,
952                                                 size);
953                 size = sizeof(ipv4_spec->hdr.dst_addr);
954                 field = ulp_rte_parser_fld_copy(field,
955                                                 &ipv4_spec->hdr.dst_addr,
956                                                 size);
957         }
958         if (ipv4_mask) {
959                 ulp_rte_prsr_mask_copy(params, &idx,
960                                        &ipv4_mask->hdr.version_ihl,
961                                        sizeof(ipv4_mask->hdr.version_ihl));
962                 /*
963                  * The tos field is ignored since OVS is setting it as wild card
964                  * match and it is not supported. This is a work around and
965                  * shall be addressed in the future.
966                  */
967                 ulp_rte_prsr_mask_ignore(params, &idx,
968                                          &ipv4_mask->hdr.type_of_service,
969                                          sizeof(ipv4_mask->hdr.type_of_service)
970                                          );
971
972                 ulp_rte_prsr_mask_copy(params, &idx,
973                                        &ipv4_mask->hdr.total_length,
974                                        sizeof(ipv4_mask->hdr.total_length));
975                 ulp_rte_prsr_mask_copy(params, &idx,
976                                        &ipv4_mask->hdr.packet_id,
977                                        sizeof(ipv4_mask->hdr.packet_id));
978                 ulp_rte_prsr_mask_copy(params, &idx,
979                                        &ipv4_mask->hdr.fragment_offset,
980                                        sizeof(ipv4_mask->hdr.fragment_offset));
981                 ulp_rte_prsr_mask_copy(params, &idx,
982                                        &ipv4_mask->hdr.time_to_live,
983                                        sizeof(ipv4_mask->hdr.time_to_live));
984                 ulp_rte_prsr_mask_copy(params, &idx,
985                                        &ipv4_mask->hdr.next_proto_id,
986                                        sizeof(ipv4_mask->hdr.next_proto_id));
987                 ulp_rte_prsr_mask_copy(params, &idx,
988                                        &ipv4_mask->hdr.hdr_checksum,
989                                        sizeof(ipv4_mask->hdr.hdr_checksum));
990                 ulp_rte_prsr_mask_copy(params, &idx,
991                                        &ipv4_mask->hdr.src_addr,
992                                        sizeof(ipv4_mask->hdr.src_addr));
993                 ulp_rte_prsr_mask_copy(params, &idx,
994                                        &ipv4_mask->hdr.dst_addr,
995                                        sizeof(ipv4_mask->hdr.dst_addr));
996         }
997         /* Add the number of ipv4 header elements */
998         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
999
1000         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1001         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1002             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1003                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1004                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1005                 inner_flag = 1;
1006         } else {
1007                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1008                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1009         }
1010
1011         /* Update the field protocol hdr bitmap */
1012         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1013         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1014         return BNXT_TF_RC_SUCCESS;
1015 }
1016
1017 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1018 int32_t
1019 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1020                          struct ulp_rte_parser_params *params)
1021 {
1022         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1023         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1024         struct ulp_rte_hdr_field *field;
1025         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1026         uint32_t idx = params->field_idx;
1027         uint32_t size;
1028         uint32_t vtcf, vtcf_mask;
1029         uint8_t proto = 0;
1030         uint32_t inner_flag = 0;
1031         uint32_t cnt;
1032
1033         /* validate there are no 3rd L3 header */
1034         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1035         if (cnt == 2) {
1036                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1037                 return BNXT_TF_RC_ERROR;
1038         }
1039
1040         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1041                               BNXT_ULP_HDR_BIT_O_ETH) &&
1042             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1043                               BNXT_ULP_HDR_BIT_I_ETH)) {
1044                 /* Since F2 flow does not include eth item, when parser detects
1045                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1046                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1047                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1048                  * This will allow the parser post processor to update the
1049                  * t_dmac in hdr_field[o_eth.dmac]
1050                  */
1051                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1052                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
1053                 params->field_idx = idx;
1054         }
1055
1056         /*
1057          * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1058          * header fields
1059          */
1060         if (ipv6_spec) {
1061                 size = sizeof(ipv6_spec->hdr.vtc_flow);
1062
1063                 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1064                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1065                                                 &vtcf,
1066                                                 size);
1067
1068                 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1069                 field = ulp_rte_parser_fld_copy(field,
1070                                                 &vtcf,
1071                                                 size);
1072
1073                 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1074                 field = ulp_rte_parser_fld_copy(field,
1075                                                 &vtcf,
1076                                                 size);
1077
1078                 size = sizeof(ipv6_spec->hdr.payload_len);
1079                 field = ulp_rte_parser_fld_copy(field,
1080                                                 &ipv6_spec->hdr.payload_len,
1081                                                 size);
1082                 size = sizeof(ipv6_spec->hdr.proto);
1083                 field = ulp_rte_parser_fld_copy(field,
1084                                                 &ipv6_spec->hdr.proto,
1085                                                 size);
1086                 proto = ipv6_spec->hdr.proto;
1087                 size = sizeof(ipv6_spec->hdr.hop_limits);
1088                 field = ulp_rte_parser_fld_copy(field,
1089                                                 &ipv6_spec->hdr.hop_limits,
1090                                                 size);
1091                 size = sizeof(ipv6_spec->hdr.src_addr);
1092                 field = ulp_rte_parser_fld_copy(field,
1093                                                 &ipv6_spec->hdr.src_addr,
1094                                                 size);
1095                 size = sizeof(ipv6_spec->hdr.dst_addr);
1096                 field = ulp_rte_parser_fld_copy(field,
1097                                                 &ipv6_spec->hdr.dst_addr,
1098                                                 size);
1099         }
1100         if (ipv6_mask) {
1101                 size = sizeof(ipv6_mask->hdr.vtc_flow);
1102
1103                 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1104                 ulp_rte_prsr_mask_copy(params, &idx,
1105                                        &vtcf_mask,
1106                                        size);
1107                 /*
1108                  * The TC and flow label field are ignored since OVS is setting
1109                  * it for match and it is not supported.
1110                  * This is a work around and
1111                  * shall be addressed in the future.
1112                  */
1113                 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1114                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1115                 vtcf_mask =
1116                         BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1117                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1118
1119                 ulp_rte_prsr_mask_copy(params, &idx,
1120                                        &ipv6_mask->hdr.payload_len,
1121                                        sizeof(ipv6_mask->hdr.payload_len));
1122                 ulp_rte_prsr_mask_copy(params, &idx,
1123                                        &ipv6_mask->hdr.proto,
1124                                        sizeof(ipv6_mask->hdr.proto));
1125                 ulp_rte_prsr_mask_copy(params, &idx,
1126                                        &ipv6_mask->hdr.hop_limits,
1127                                        sizeof(ipv6_mask->hdr.hop_limits));
1128                 ulp_rte_prsr_mask_copy(params, &idx,
1129                                        &ipv6_mask->hdr.src_addr,
1130                                        sizeof(ipv6_mask->hdr.src_addr));
1131                 ulp_rte_prsr_mask_copy(params, &idx,
1132                                        &ipv6_mask->hdr.dst_addr,
1133                                        sizeof(ipv6_mask->hdr.dst_addr));
1134         }
1135         /* add number of ipv6 header elements */
1136         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1137
1138         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1139         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1140             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1141                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1142                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1143                 inner_flag = 1;
1144         } else {
1145                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1146                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1147         }
1148
1149         /* Update the field protocol hdr bitmap */
1150         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1151         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1152
1153         return BNXT_TF_RC_SUCCESS;
1154 }
1155
1156 /* Function to handle the update of proto header based on field values */
1157 static void
1158 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1159                              uint16_t dst_port)
1160 {
1161         if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1162                 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1163                                BNXT_ULP_HDR_BIT_T_VXLAN);
1164                 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1165         }
1166 }
1167
1168 /* Function to handle the parsing of RTE Flow item UDP Header. */
1169 int32_t
1170 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1171                         struct ulp_rte_parser_params *params)
1172 {
1173         const struct rte_flow_item_udp *udp_spec = item->spec;
1174         const struct rte_flow_item_udp *udp_mask = item->mask;
1175         struct ulp_rte_hdr_field *field;
1176         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1177         uint32_t idx = params->field_idx;
1178         uint32_t size;
1179         uint16_t dst_port = 0;
1180         uint32_t cnt;
1181
1182         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1183         if (cnt == 2) {
1184                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1185                 return BNXT_TF_RC_ERROR;
1186         }
1187
1188         /*
1189          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1190          * header fields
1191          */
1192         if (udp_spec) {
1193                 size = sizeof(udp_spec->hdr.src_port);
1194                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1195                                                 &udp_spec->hdr.src_port,
1196                                                 size);
1197
1198                 size = sizeof(udp_spec->hdr.dst_port);
1199                 field = ulp_rte_parser_fld_copy(field,
1200                                                 &udp_spec->hdr.dst_port,
1201                                                 size);
1202                 dst_port = udp_spec->hdr.dst_port;
1203                 size = sizeof(udp_spec->hdr.dgram_len);
1204                 field = ulp_rte_parser_fld_copy(field,
1205                                                 &udp_spec->hdr.dgram_len,
1206                                                 size);
1207                 size = sizeof(udp_spec->hdr.dgram_cksum);
1208                 field = ulp_rte_parser_fld_copy(field,
1209                                                 &udp_spec->hdr.dgram_cksum,
1210                                                 size);
1211         }
1212         if (udp_mask) {
1213                 ulp_rte_prsr_mask_copy(params, &idx,
1214                                        &udp_mask->hdr.src_port,
1215                                        sizeof(udp_mask->hdr.src_port));
1216                 ulp_rte_prsr_mask_copy(params, &idx,
1217                                        &udp_mask->hdr.dst_port,
1218                                        sizeof(udp_mask->hdr.dst_port));
1219                 ulp_rte_prsr_mask_copy(params, &idx,
1220                                        &udp_mask->hdr.dgram_len,
1221                                        sizeof(udp_mask->hdr.dgram_len));
1222                 ulp_rte_prsr_mask_copy(params, &idx,
1223                                        &udp_mask->hdr.dgram_cksum,
1224                                        sizeof(udp_mask->hdr.dgram_cksum));
1225         }
1226
1227         /* Add number of UDP header elements */
1228         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1229
1230         /* Set the udp header bitmap and computed l4 header bitmaps */
1231         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1232             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1233                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1234                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1235         } else {
1236                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1237                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1238                 /* Update the field protocol hdr bitmap */
1239                 ulp_rte_l4_proto_type_update(params, dst_port);
1240         }
1241         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1242         return BNXT_TF_RC_SUCCESS;
1243 }
1244
1245 /* Function to handle the parsing of RTE Flow item TCP Header. */
1246 int32_t
1247 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1248                         struct ulp_rte_parser_params *params)
1249 {
1250         const struct rte_flow_item_tcp *tcp_spec = item->spec;
1251         const struct rte_flow_item_tcp *tcp_mask = item->mask;
1252         struct ulp_rte_hdr_field *field;
1253         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1254         uint32_t idx = params->field_idx;
1255         uint32_t size;
1256         uint32_t cnt;
1257
1258         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1259         if (cnt == 2) {
1260                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1261                 return BNXT_TF_RC_ERROR;
1262         }
1263
1264         /*
1265          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1266          * header fields
1267          */
1268         if (tcp_spec) {
1269                 size = sizeof(tcp_spec->hdr.src_port);
1270                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1271                                                 &tcp_spec->hdr.src_port,
1272                                                 size);
1273                 size = sizeof(tcp_spec->hdr.dst_port);
1274                 field = ulp_rte_parser_fld_copy(field,
1275                                                 &tcp_spec->hdr.dst_port,
1276                                                 size);
1277                 size = sizeof(tcp_spec->hdr.sent_seq);
1278                 field = ulp_rte_parser_fld_copy(field,
1279                                                 &tcp_spec->hdr.sent_seq,
1280                                                 size);
1281                 size = sizeof(tcp_spec->hdr.recv_ack);
1282                 field = ulp_rte_parser_fld_copy(field,
1283                                                 &tcp_spec->hdr.recv_ack,
1284                                                 size);
1285                 size = sizeof(tcp_spec->hdr.data_off);
1286                 field = ulp_rte_parser_fld_copy(field,
1287                                                 &tcp_spec->hdr.data_off,
1288                                                 size);
1289                 size = sizeof(tcp_spec->hdr.tcp_flags);
1290                 field = ulp_rte_parser_fld_copy(field,
1291                                                 &tcp_spec->hdr.tcp_flags,
1292                                                 size);
1293                 size = sizeof(tcp_spec->hdr.rx_win);
1294                 field = ulp_rte_parser_fld_copy(field,
1295                                                 &tcp_spec->hdr.rx_win,
1296                                                 size);
1297                 size = sizeof(tcp_spec->hdr.cksum);
1298                 field = ulp_rte_parser_fld_copy(field,
1299                                                 &tcp_spec->hdr.cksum,
1300                                                 size);
1301                 size = sizeof(tcp_spec->hdr.tcp_urp);
1302                 field = ulp_rte_parser_fld_copy(field,
1303                                                 &tcp_spec->hdr.tcp_urp,
1304                                                 size);
1305         } else {
1306                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1307         }
1308
1309         if (tcp_mask) {
1310                 ulp_rte_prsr_mask_copy(params, &idx,
1311                                        &tcp_mask->hdr.src_port,
1312                                        sizeof(tcp_mask->hdr.src_port));
1313                 ulp_rte_prsr_mask_copy(params, &idx,
1314                                        &tcp_mask->hdr.dst_port,
1315                                        sizeof(tcp_mask->hdr.dst_port));
1316                 ulp_rte_prsr_mask_copy(params, &idx,
1317                                        &tcp_mask->hdr.sent_seq,
1318                                        sizeof(tcp_mask->hdr.sent_seq));
1319                 ulp_rte_prsr_mask_copy(params, &idx,
1320                                        &tcp_mask->hdr.recv_ack,
1321                                        sizeof(tcp_mask->hdr.recv_ack));
1322                 ulp_rte_prsr_mask_copy(params, &idx,
1323                                        &tcp_mask->hdr.data_off,
1324                                        sizeof(tcp_mask->hdr.data_off));
1325                 ulp_rte_prsr_mask_copy(params, &idx,
1326                                        &tcp_mask->hdr.tcp_flags,
1327                                        sizeof(tcp_mask->hdr.tcp_flags));
1328                 ulp_rte_prsr_mask_copy(params, &idx,
1329                                        &tcp_mask->hdr.rx_win,
1330                                        sizeof(tcp_mask->hdr.rx_win));
1331                 ulp_rte_prsr_mask_copy(params, &idx,
1332                                        &tcp_mask->hdr.cksum,
1333                                        sizeof(tcp_mask->hdr.cksum));
1334                 ulp_rte_prsr_mask_copy(params, &idx,
1335                                        &tcp_mask->hdr.tcp_urp,
1336                                        sizeof(tcp_mask->hdr.tcp_urp));
1337         }
1338         /* add number of TCP header elements */
1339         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1340
1341         /* Set the udp header bitmap and computed l4 header bitmaps */
1342         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1343             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1344                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1345                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1346         } else {
1347                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1348                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1349         }
1350         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1351         return BNXT_TF_RC_SUCCESS;
1352 }
1353
1354 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1355 int32_t
1356 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1357                           struct ulp_rte_parser_params *params)
1358 {
1359         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1360         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1361         struct ulp_rte_hdr_field *field;
1362         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1363         uint32_t idx = params->field_idx;
1364         uint32_t size;
1365
1366         /*
1367          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1368          * header fields
1369          */
1370         if (vxlan_spec) {
1371                 size = sizeof(vxlan_spec->flags);
1372                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1373                                                 &vxlan_spec->flags,
1374                                                 size);
1375                 size = sizeof(vxlan_spec->rsvd0);
1376                 field = ulp_rte_parser_fld_copy(field,
1377                                                 &vxlan_spec->rsvd0,
1378                                                 size);
1379                 size = sizeof(vxlan_spec->vni);
1380                 field = ulp_rte_parser_fld_copy(field,
1381                                                 &vxlan_spec->vni,
1382                                                 size);
1383                 size = sizeof(vxlan_spec->rsvd1);
1384                 field = ulp_rte_parser_fld_copy(field,
1385                                                 &vxlan_spec->rsvd1,
1386                                                 size);
1387         }
1388         if (vxlan_mask) {
1389                 ulp_rte_prsr_mask_copy(params, &idx,
1390                                        &vxlan_mask->flags,
1391                                        sizeof(vxlan_mask->flags));
1392                 ulp_rte_prsr_mask_copy(params, &idx,
1393                                        &vxlan_mask->rsvd0,
1394                                        sizeof(vxlan_mask->rsvd0));
1395                 ulp_rte_prsr_mask_copy(params, &idx,
1396                                        &vxlan_mask->vni,
1397                                        sizeof(vxlan_mask->vni));
1398                 ulp_rte_prsr_mask_copy(params, &idx,
1399                                        &vxlan_mask->rsvd1,
1400                                        sizeof(vxlan_mask->rsvd1));
1401         }
1402         /* Add number of vxlan header elements */
1403         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1404
1405         /* Update the hdr_bitmap with vxlan */
1406         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1407         return BNXT_TF_RC_SUCCESS;
1408 }
1409
1410 /* Function to handle the parsing of RTE Flow item void Header */
1411 int32_t
1412 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1413                          struct ulp_rte_parser_params *params __rte_unused)
1414 {
1415         return BNXT_TF_RC_SUCCESS;
1416 }
1417
1418 /* Function to handle the parsing of RTE Flow action void Header. */
1419 int32_t
1420 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1421                          struct ulp_rte_parser_params *params __rte_unused)
1422 {
1423         return BNXT_TF_RC_SUCCESS;
1424 }
1425
1426 /* Function to handle the parsing of RTE Flow action Mark Header. */
1427 int32_t
1428 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1429                          struct ulp_rte_parser_params *param)
1430 {
1431         const struct rte_flow_action_mark *mark;
1432         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1433         uint32_t mark_id;
1434
1435         mark = action_item->conf;
1436         if (mark) {
1437                 mark_id = tfp_cpu_to_be_32(mark->id);
1438                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1439                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1440
1441                 /* Update the hdr_bitmap with vxlan */
1442                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1443                 return BNXT_TF_RC_SUCCESS;
1444         }
1445         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1446         return BNXT_TF_RC_ERROR;
1447 }
1448
1449 /* Function to handle the parsing of RTE Flow action RSS Header. */
1450 int32_t
1451 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1452                         struct ulp_rte_parser_params *param)
1453 {
1454         const struct rte_flow_action_rss *rss = action_item->conf;
1455
1456         if (rss) {
1457                 /* Update the hdr_bitmap with vxlan */
1458                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1459                 return BNXT_TF_RC_SUCCESS;
1460         }
1461         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1462         return BNXT_TF_RC_ERROR;
1463 }
1464
1465 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1466 int32_t
1467 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1468                                 struct ulp_rte_parser_params *params)
1469 {
1470         const struct rte_flow_action_vxlan_encap *vxlan_encap;
1471         const struct rte_flow_item *item;
1472         const struct rte_flow_item_eth *eth_spec;
1473         const struct rte_flow_item_ipv4 *ipv4_spec;
1474         const struct rte_flow_item_ipv6 *ipv6_spec;
1475         struct rte_flow_item_vxlan vxlan_spec;
1476         uint32_t vlan_num = 0, vlan_size = 0;
1477         uint32_t ip_size = 0, ip_type = 0;
1478         uint32_t vxlan_size = 0;
1479         uint8_t *buff;
1480         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1481         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1482                                     0x00, 0x40, 0x11};
1483         /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1484         const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1485                                 0x00, 0x11, 0xf6};
1486         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1487         struct ulp_rte_act_prop *ap = &params->act_prop;
1488         const uint8_t *tmp_buff;
1489
1490         vxlan_encap = action_item->conf;
1491         if (!vxlan_encap) {
1492                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1493                 return BNXT_TF_RC_ERROR;
1494         }
1495
1496         item = vxlan_encap->definition;
1497         if (!item) {
1498                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1499                 return BNXT_TF_RC_ERROR;
1500         }
1501
1502         if (!ulp_rte_item_skip_void(&item, 0))
1503                 return BNXT_TF_RC_ERROR;
1504
1505         /* must have ethernet header */
1506         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1507                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1508                 return BNXT_TF_RC_ERROR;
1509         }
1510         eth_spec = item->spec;
1511         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1512         ulp_encap_buffer_copy(buff,
1513                               eth_spec->dst.addr_bytes,
1514                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1515                               ULP_BUFFER_ALIGN_8_BYTE);
1516
1517         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1518         ulp_encap_buffer_copy(buff,
1519                               eth_spec->src.addr_bytes,
1520                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1521                               ULP_BUFFER_ALIGN_8_BYTE);
1522
1523         /* Goto the next item */
1524         if (!ulp_rte_item_skip_void(&item, 1))
1525                 return BNXT_TF_RC_ERROR;
1526
1527         /* May have vlan header */
1528         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1529                 vlan_num++;
1530                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1531                 ulp_encap_buffer_copy(buff,
1532                                       item->spec,
1533                                       sizeof(struct rte_flow_item_vlan),
1534                                       ULP_BUFFER_ALIGN_8_BYTE);
1535
1536                 if (!ulp_rte_item_skip_void(&item, 1))
1537                         return BNXT_TF_RC_ERROR;
1538         }
1539
1540         /* may have two vlan headers */
1541         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1542                 vlan_num++;
1543                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1544                        sizeof(struct rte_flow_item_vlan)],
1545                        item->spec,
1546                        sizeof(struct rte_flow_item_vlan));
1547                 if (!ulp_rte_item_skip_void(&item, 1))
1548                         return BNXT_TF_RC_ERROR;
1549         }
1550         /* Update the vlan count and size of more than one */
1551         if (vlan_num) {
1552                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1553                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1554                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1555                        &vlan_num,
1556                        sizeof(uint32_t));
1557                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1558                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1559                        &vlan_size,
1560                        sizeof(uint32_t));
1561         }
1562
1563         /* L3 must be IPv4, IPv6 */
1564         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1565                 ipv4_spec = item->spec;
1566                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1567
1568                 /* copy the ipv4 details */
1569                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1570                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1571                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1572                         ulp_encap_buffer_copy(buff,
1573                                               def_ipv4_hdr,
1574                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1575                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1576                                               ULP_BUFFER_ALIGN_8_BYTE);
1577                 } else {
1578                         /* Total length being ignored in the ip hdr. */
1579                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1580                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1581                         ulp_encap_buffer_copy(buff,
1582                                               tmp_buff,
1583                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1584                                               ULP_BUFFER_ALIGN_8_BYTE);
1585                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1586                              BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1587                         ulp_encap_buffer_copy(buff,
1588                                               &ipv4_spec->hdr.version_ihl,
1589                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1590                                               ULP_BUFFER_ALIGN_8_BYTE);
1591                 }
1592
1593                 /* Update the dst ip address in ip encap buffer */
1594                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1595                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1596                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1597                 ulp_encap_buffer_copy(buff,
1598                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1599                                       sizeof(ipv4_spec->hdr.dst_addr),
1600                                       ULP_BUFFER_ALIGN_8_BYTE);
1601
1602                 /* Update the src ip address */
1603                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1604                         BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1605                         sizeof(ipv4_spec->hdr.src_addr)];
1606                 ulp_encap_buffer_copy(buff,
1607                                       (const uint8_t *)&ipv4_spec->hdr.src_addr,
1608                                       sizeof(ipv4_spec->hdr.src_addr),
1609                                       ULP_BUFFER_ALIGN_8_BYTE);
1610
1611                 /* Update the ip size details */
1612                 ip_size = tfp_cpu_to_be_32(ip_size);
1613                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1614                        &ip_size, sizeof(uint32_t));
1615
1616                 /* update the ip type */
1617                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1618                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1619                        &ip_type, sizeof(uint32_t));
1620
1621                 /* update the computed field to notify it is ipv4 header */
1622                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1623                                     1);
1624
1625                 if (!ulp_rte_item_skip_void(&item, 1))
1626                         return BNXT_TF_RC_ERROR;
1627         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1628                 ipv6_spec = item->spec;
1629                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1630
1631                 /* copy the ipv6 details */
1632                 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1633                 if (ulp_buffer_is_empty(tmp_buff,
1634                                         BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1635                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1636                         ulp_encap_buffer_copy(buff,
1637                                               def_ipv6_hdr,
1638                                               sizeof(def_ipv6_hdr),
1639                                               ULP_BUFFER_ALIGN_8_BYTE);
1640                 } else {
1641                         /* The payload length being ignored in the ip hdr. */
1642                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1643                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1644                         ulp_encap_buffer_copy(buff,
1645                                               tmp_buff,
1646                                               BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1647                                               ULP_BUFFER_ALIGN_8_BYTE);
1648                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1649                                 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1650                                 BNXT_ULP_ENCAP_IPV6_DO];
1651                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1652                         ulp_encap_buffer_copy(buff,
1653                                               tmp_buff,
1654                                               BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1655                                               ULP_BUFFER_ALIGN_8_BYTE);
1656                 }
1657                 /* Update the dst ip address in ip encap buffer */
1658                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1659                         sizeof(def_ipv6_hdr)];
1660                 ulp_encap_buffer_copy(buff,
1661                                       (const uint8_t *)ipv6_spec->hdr.dst_addr,
1662                                       sizeof(ipv6_spec->hdr.dst_addr),
1663                                       ULP_BUFFER_ALIGN_8_BYTE);
1664
1665                 /* Update the src ip address */
1666                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1667                 ulp_encap_buffer_copy(buff,
1668                                       (const uint8_t *)ipv6_spec->hdr.src_addr,
1669                                       sizeof(ipv6_spec->hdr.src_addr),
1670                                       ULP_BUFFER_ALIGN_16_BYTE);
1671
1672                 /* Update the ip size details */
1673                 ip_size = tfp_cpu_to_be_32(ip_size);
1674                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1675                        &ip_size, sizeof(uint32_t));
1676
1677                  /* update the ip type */
1678                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1679                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1680                        &ip_type, sizeof(uint32_t));
1681
1682                 /* update the computed field to notify it is ipv6 header */
1683                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1684                                     1);
1685
1686                 if (!ulp_rte_item_skip_void(&item, 1))
1687                         return BNXT_TF_RC_ERROR;
1688         } else {
1689                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1690                 return BNXT_TF_RC_ERROR;
1691         }
1692
1693         /* L4 is UDP */
1694         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1695                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1696                 return BNXT_TF_RC_ERROR;
1697         }
1698         /* copy the udp details */
1699         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1700                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1701                               ULP_BUFFER_ALIGN_8_BYTE);
1702
1703         if (!ulp_rte_item_skip_void(&item, 1))
1704                 return BNXT_TF_RC_ERROR;
1705
1706         /* Finally VXLAN */
1707         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1708                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1709                 return BNXT_TF_RC_ERROR;
1710         }
1711         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1712         /* copy the vxlan details */
1713         memcpy(&vxlan_spec, item->spec, vxlan_size);
1714         vxlan_spec.flags = 0x08;
1715         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1716         if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1717                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1718                                       vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1719         } else {
1720                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1721                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1722                 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1723                                       (const uint8_t *)&vxlan_spec.vni,
1724                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1725         }
1726         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1727         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1728                &vxlan_size, sizeof(uint32_t));
1729
1730         /* update the hdr_bitmap with vxlan */
1731         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1732         return BNXT_TF_RC_SUCCESS;
1733 }
1734
1735 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1736 int32_t
1737 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1738                                 __rte_unused,
1739                                 struct ulp_rte_parser_params *params)
1740 {
1741         /* update the hdr_bitmap with vxlan */
1742         ULP_BITMAP_SET(params->act_bitmap.bits,
1743                        BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1744         /* Update computational field with tunnel decap info */
1745         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1746         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1747         return BNXT_TF_RC_SUCCESS;
1748 }
1749
1750 /* Function to handle the parsing of RTE Flow action drop Header. */
1751 int32_t
1752 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1753                          struct ulp_rte_parser_params *params)
1754 {
1755         /* Update the hdr_bitmap with drop */
1756         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1757         return BNXT_TF_RC_SUCCESS;
1758 }
1759
1760 /* Function to handle the parsing of RTE Flow action count. */
1761 int32_t
1762 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1763                           struct ulp_rte_parser_params *params)
1764
1765 {
1766         const struct rte_flow_action_count *act_count;
1767         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1768
1769         act_count = action_item->conf;
1770         if (act_count) {
1771                 if (act_count->shared) {
1772                         BNXT_TF_DBG(ERR,
1773                                     "Parse Error:Shared count not supported\n");
1774                         return BNXT_TF_RC_PARSE_ERR;
1775                 }
1776                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1777                        &act_count->id,
1778                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1779         }
1780
1781         /* Update the hdr_bitmap with count */
1782         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1783         return BNXT_TF_RC_SUCCESS;
1784 }
1785
1786 /* Function to handle the parsing of action ports. */
1787 static int32_t
1788 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1789                             uint32_t ifindex)
1790 {
1791         enum bnxt_ulp_direction_type dir;
1792         uint16_t pid_s;
1793         uint32_t pid;
1794         struct ulp_rte_act_prop *act = &param->act_prop;
1795         enum bnxt_ulp_intf_type port_type;
1796         uint32_t vnic_type;
1797
1798         /* Get the direction */
1799         dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1800         if (dir == BNXT_ULP_DIR_EGRESS) {
1801                 /* For egress direction, fill vport */
1802                 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1803                         return BNXT_TF_RC_ERROR;
1804
1805                 pid = pid_s;
1806                 pid = rte_cpu_to_be_32(pid);
1807                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1808                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1809         } else {
1810                 /* For ingress direction, fill vnic */
1811                 port_type = ULP_COMP_FLD_IDX_RD(param,
1812                                                 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1813                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1814                         vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1815                 else
1816                         vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1817
1818                 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1819                                                  vnic_type, &pid_s))
1820                         return BNXT_TF_RC_ERROR;
1821
1822                 pid = pid_s;
1823                 pid = rte_cpu_to_be_32(pid);
1824                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1825                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1826         }
1827
1828         /* Update the action port set bit */
1829         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1830         return BNXT_TF_RC_SUCCESS;
1831 }
1832
1833 /* Function to handle the parsing of RTE Flow action PF. */
1834 int32_t
1835 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1836                        struct ulp_rte_parser_params *params)
1837 {
1838         uint32_t port_id;
1839         uint32_t ifindex;
1840         enum bnxt_ulp_intf_type intf_type;
1841
1842         /* Get the port id of the current device */
1843         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1844
1845         /* Get the port db ifindex */
1846         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1847                                               &ifindex)) {
1848                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1849                 return BNXT_TF_RC_ERROR;
1850         }
1851
1852         /* Check the port is PF port */
1853         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1854         if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1855                 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1856                 return BNXT_TF_RC_ERROR;
1857         }
1858         /* Update the action properties */
1859         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1860         return ulp_rte_parser_act_port_set(params, ifindex);
1861 }
1862
1863 /* Function to handle the parsing of RTE Flow action VF. */
1864 int32_t
1865 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1866                        struct ulp_rte_parser_params *params)
1867 {
1868         const struct rte_flow_action_vf *vf_action;
1869         uint32_t ifindex;
1870         enum bnxt_ulp_intf_type intf_type;
1871
1872         vf_action = action_item->conf;
1873         if (!vf_action) {
1874                 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1875                 return BNXT_TF_RC_PARSE_ERR;
1876         }
1877
1878         if (vf_action->original) {
1879                 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1880                 return BNXT_TF_RC_PARSE_ERR;
1881         }
1882
1883         /* Check the port is VF port */
1884         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1885                                                  &ifindex)) {
1886                 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1887                 return BNXT_TF_RC_ERROR;
1888         }
1889         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1890         if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1891             intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1892                 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1893                 return BNXT_TF_RC_ERROR;
1894         }
1895
1896         /* Update the action properties */
1897         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1898         return ulp_rte_parser_act_port_set(params, ifindex);
1899 }
1900
1901 /* Function to handle the parsing of RTE Flow action port_id. */
1902 int32_t
1903 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1904                             struct ulp_rte_parser_params *param)
1905 {
1906         const struct rte_flow_action_port_id *port_id = act_item->conf;
1907         uint32_t ifindex;
1908         enum bnxt_ulp_intf_type intf_type;
1909
1910         if (!port_id) {
1911                 BNXT_TF_DBG(ERR,
1912                             "ParseErr: Invalid Argument\n");
1913                 return BNXT_TF_RC_PARSE_ERR;
1914         }
1915         if (port_id->original) {
1916                 BNXT_TF_DBG(ERR,
1917                             "ParseErr:Portid Original not supported\n");
1918                 return BNXT_TF_RC_PARSE_ERR;
1919         }
1920
1921         /* Get the port db ifindex */
1922         if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1923                                               &ifindex)) {
1924                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1925                 return BNXT_TF_RC_ERROR;
1926         }
1927
1928         /* Get the intf type */
1929         intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1930         if (!intf_type) {
1931                 BNXT_TF_DBG(ERR, "Invalid port type\n");
1932                 return BNXT_TF_RC_ERROR;
1933         }
1934
1935         /* Set the action port */
1936         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1937         return ulp_rte_parser_act_port_set(param, ifindex);
1938 }
1939
1940 /* Function to handle the parsing of RTE Flow action phy_port. */
1941 int32_t
1942 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1943                              struct ulp_rte_parser_params *prm)
1944 {
1945         const struct rte_flow_action_phy_port *phy_port;
1946         uint32_t pid;
1947         int32_t rc;
1948         uint16_t pid_s;
1949         enum bnxt_ulp_direction_type dir;
1950
1951         phy_port = action_item->conf;
1952         if (!phy_port) {
1953                 BNXT_TF_DBG(ERR,
1954                             "ParseErr: Invalid Argument\n");
1955                 return BNXT_TF_RC_PARSE_ERR;
1956         }
1957
1958         if (phy_port->original) {
1959                 BNXT_TF_DBG(ERR,
1960                             "Parse Err:Port Original not supported\n");
1961                 return BNXT_TF_RC_PARSE_ERR;
1962         }
1963         dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1964         if (dir != BNXT_ULP_DIR_EGRESS) {
1965                 BNXT_TF_DBG(ERR,
1966                             "Parse Err:Phy ports are valid only for egress\n");
1967                 return BNXT_TF_RC_PARSE_ERR;
1968         }
1969         /* Get the physical port details from port db */
1970         rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1971                                             &pid_s);
1972         if (rc) {
1973                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1974                 return -EINVAL;
1975         }
1976
1977         pid = pid_s;
1978         pid = rte_cpu_to_be_32(pid);
1979         memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1980                &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1981
1982         /* Update the action port set bit */
1983         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1984         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1985                             BNXT_ULP_INTF_TYPE_PHY_PORT);
1986         return BNXT_TF_RC_SUCCESS;
1987 }
1988
1989 /* Function to handle the parsing of RTE Flow action pop vlan. */
1990 int32_t
1991 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1992                                 struct ulp_rte_parser_params *params)
1993 {
1994         /* Update the act_bitmap with pop */
1995         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1996         return BNXT_TF_RC_SUCCESS;
1997 }
1998
1999 /* Function to handle the parsing of RTE Flow action push vlan. */
2000 int32_t
2001 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2002                                  struct ulp_rte_parser_params *params)
2003 {
2004         const struct rte_flow_action_of_push_vlan *push_vlan;
2005         uint16_t ethertype;
2006         struct ulp_rte_act_prop *act = &params->act_prop;
2007
2008         push_vlan = action_item->conf;
2009         if (push_vlan) {
2010                 ethertype = push_vlan->ethertype;
2011                 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2012                         BNXT_TF_DBG(ERR,
2013                                     "Parse Err: Ethertype not supported\n");
2014                         return BNXT_TF_RC_PARSE_ERR;
2015                 }
2016                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2017                        &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2018                 /* Update the hdr_bitmap with push vlan */
2019                 ULP_BITMAP_SET(params->act_bitmap.bits,
2020                                BNXT_ULP_ACTION_BIT_PUSH_VLAN);
2021                 return BNXT_TF_RC_SUCCESS;
2022         }
2023         BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2024         return BNXT_TF_RC_ERROR;
2025 }
2026
2027 /* Function to handle the parsing of RTE Flow action set vlan id. */
2028 int32_t
2029 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2030                                     struct ulp_rte_parser_params *params)
2031 {
2032         const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2033         uint32_t vid;
2034         struct ulp_rte_act_prop *act = &params->act_prop;
2035
2036         vlan_vid = action_item->conf;
2037         if (vlan_vid && vlan_vid->vlan_vid) {
2038                 vid = vlan_vid->vlan_vid;
2039                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2040                        &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2041                 /* Update the hdr_bitmap with vlan vid */
2042                 ULP_BITMAP_SET(params->act_bitmap.bits,
2043                                BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
2044                 return BNXT_TF_RC_SUCCESS;
2045         }
2046         BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2047         return BNXT_TF_RC_ERROR;
2048 }
2049
2050 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2051 int32_t
2052 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2053                                     struct ulp_rte_parser_params *params)
2054 {
2055         const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2056         uint8_t pcp;
2057         struct ulp_rte_act_prop *act = &params->act_prop;
2058
2059         vlan_pcp = action_item->conf;
2060         if (vlan_pcp) {
2061                 pcp = vlan_pcp->vlan_pcp;
2062                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2063                        &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2064                 /* Update the hdr_bitmap with vlan vid */
2065                 ULP_BITMAP_SET(params->act_bitmap.bits,
2066                                BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
2067                 return BNXT_TF_RC_SUCCESS;
2068         }
2069         BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2070         return BNXT_TF_RC_ERROR;
2071 }
2072
2073 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2074 int32_t
2075 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2076                                  struct ulp_rte_parser_params *params)
2077 {
2078         const struct rte_flow_action_set_ipv4 *set_ipv4;
2079         struct ulp_rte_act_prop *act = &params->act_prop;
2080
2081         set_ipv4 = action_item->conf;
2082         if (set_ipv4) {
2083                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2084                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2085                 /* Update the hdr_bitmap with set ipv4 src */
2086                 ULP_BITMAP_SET(params->act_bitmap.bits,
2087                                BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
2088                 return BNXT_TF_RC_SUCCESS;
2089         }
2090         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2091         return BNXT_TF_RC_ERROR;
2092 }
2093
2094 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2095 int32_t
2096 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2097                                  struct ulp_rte_parser_params *params)
2098 {
2099         const struct rte_flow_action_set_ipv4 *set_ipv4;
2100         struct ulp_rte_act_prop *act = &params->act_prop;
2101
2102         set_ipv4 = action_item->conf;
2103         if (set_ipv4) {
2104                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2105                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2106                 /* Update the hdr_bitmap with set ipv4 dst */
2107                 ULP_BITMAP_SET(params->act_bitmap.bits,
2108                                BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
2109                 return BNXT_TF_RC_SUCCESS;
2110         }
2111         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2112         return BNXT_TF_RC_ERROR;
2113 }
2114
2115 /* Function to handle the parsing of RTE Flow action set tp src.*/
2116 int32_t
2117 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2118                                struct ulp_rte_parser_params *params)
2119 {
2120         const struct rte_flow_action_set_tp *set_tp;
2121         struct ulp_rte_act_prop *act = &params->act_prop;
2122
2123         set_tp = action_item->conf;
2124         if (set_tp) {
2125                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2126                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2127                 /* Update the hdr_bitmap with set tp src */
2128                 ULP_BITMAP_SET(params->act_bitmap.bits,
2129                                BNXT_ULP_ACTION_BIT_SET_TP_SRC);
2130                 return BNXT_TF_RC_SUCCESS;
2131         }
2132
2133         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2134         return BNXT_TF_RC_ERROR;
2135 }
2136
2137 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2138 int32_t
2139 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2140                                struct ulp_rte_parser_params *params)
2141 {
2142         const struct rte_flow_action_set_tp *set_tp;
2143         struct ulp_rte_act_prop *act = &params->act_prop;
2144
2145         set_tp = action_item->conf;
2146         if (set_tp) {
2147                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2148                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2149                 /* Update the hdr_bitmap with set tp dst */
2150                 ULP_BITMAP_SET(params->act_bitmap.bits,
2151                                BNXT_ULP_ACTION_BIT_SET_TP_DST);
2152                 return BNXT_TF_RC_SUCCESS;
2153         }
2154
2155         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2156         return BNXT_TF_RC_ERROR;
2157 }
2158
2159 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2160 int32_t
2161 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2162                             struct ulp_rte_parser_params *params)
2163 {
2164         /* Update the act_bitmap with dec ttl */
2165         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2166         return BNXT_TF_RC_SUCCESS;
2167 }
2168
2169 /* Function to handle the parsing of RTE Flow action JUMP */
2170 int32_t
2171 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2172                             struct ulp_rte_parser_params *params)
2173 {
2174         /* Update the act_bitmap with dec ttl */
2175         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
2176         return BNXT_TF_RC_SUCCESS;
2177 }