net/bnxt: support wildcard pattern matching
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
14 #include "tfp.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
18 #include "ulp_tun.h"
19
20 /* Local defines for the parsing functions */
21 #define ULP_VLAN_PRIORITY_SHIFT         13 /* First 3 bits */
22 #define ULP_VLAN_PRIORITY_MASK          0x700
23 #define ULP_VLAN_TAG_MASK               0xFFF /* Last 12 bits*/
24 #define ULP_UDP_PORT_VXLAN              4789
25
26 /* Utility function to skip the void items. */
27 static inline int32_t
28 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 {
30         if (!*item)
31                 return 0;
32         if (increment)
33                 (*item)++;
34         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
35                 (*item)++;
36         if (*item)
37                 return 1;
38         return 0;
39 }
40
41 /* Utility function to update the field_bitmap */
42 static void
43 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
44                                    uint32_t idx)
45 {
46         struct ulp_rte_hdr_field *field;
47
48         field = &params->hdr_field[idx];
49         if (ulp_bitmap_notzero(field->mask, field->size)) {
50                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
51                 /* Not exact match */
52                 if (!ulp_bitmap_is_ones(field->mask, field->size))
53                         ULP_COMP_FLD_IDX_WR(params,
54                                             BNXT_ULP_CF_IDX_WC_MATCH, 1);
55         } else {
56                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
57         }
58 }
59
60 /* Utility function to copy field spec items */
61 static struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
63                         const void *buffer,
64                         uint32_t size)
65 {
66         field->size = size;
67         memcpy(field->spec, buffer, field->size);
68         field++;
69         return field;
70 }
71
72 /* Utility function to copy field masks items */
73 static void
74 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
75                        uint32_t *idx,
76                        const void *buffer,
77                        uint32_t size)
78 {
79         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
80
81         memcpy(field->mask, buffer, size);
82         ulp_rte_parser_field_bitmap_update(params, *idx);
83         *idx = *idx + 1;
84 }
85
86 /* Utility function to ignore field masks items */
87 static void
88 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
89                          uint32_t *idx,
90                          const void *buffer __rte_unused,
91                          uint32_t size __rte_unused)
92 {
93         *idx = *idx + 1;
94 }
95
96 /*
97  * Function to handle the parsing of RTE Flows and placing
98  * the RTE flow items into the ulp structures.
99  */
100 int32_t
101 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
102                               struct ulp_rte_parser_params *params)
103 {
104         const struct rte_flow_item *item = pattern;
105         struct bnxt_ulp_rte_hdr_info *hdr_info;
106
107         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
108
109         /* Set the computed flags for no vlan tags before parsing */
110         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
111         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
112
113         /* Parse all the items in the pattern */
114         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
115                 /* get the header information from the flow_hdr_info table */
116                 hdr_info = &ulp_hdr_info[item->type];
117                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
118                         BNXT_TF_DBG(ERR,
119                                     "Truflow parser does not support type %d\n",
120                                     item->type);
121                         return BNXT_TF_RC_PARSE_ERR;
122                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
123                         /* call the registered callback handler */
124                         if (hdr_info->proto_hdr_func) {
125                                 if (hdr_info->proto_hdr_func(item, params) !=
126                                     BNXT_TF_RC_SUCCESS) {
127                                         return BNXT_TF_RC_ERROR;
128                                 }
129                         }
130                 }
131                 item++;
132         }
133         /* update the implied SVIF */
134         return ulp_rte_parser_implicit_match_port_process(params);
135 }
136
137 /*
138  * Function to handle the parsing of RTE Flows and placing
139  * the RTE flow actions into the ulp structures.
140  */
141 int32_t
142 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
143                               struct ulp_rte_parser_params *params)
144 {
145         const struct rte_flow_action *action_item = actions;
146         struct bnxt_ulp_rte_act_info *hdr_info;
147
148         /* Parse all the items in the pattern */
149         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
150                 /* get the header information from the flow_hdr_info table */
151                 hdr_info = &ulp_act_info[action_item->type];
152                 if (hdr_info->act_type ==
153                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
154                         BNXT_TF_DBG(ERR,
155                                     "Truflow parser does not support act %u\n",
156                                     action_item->type);
157                         return BNXT_TF_RC_ERROR;
158                 } else if (hdr_info->act_type ==
159                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
160                         /* call the registered callback handler */
161                         if (hdr_info->proto_act_func) {
162                                 if (hdr_info->proto_act_func(action_item,
163                                                              params) !=
164                                     BNXT_TF_RC_SUCCESS) {
165                                         return BNXT_TF_RC_ERROR;
166                                 }
167                         }
168                 }
169                 action_item++;
170         }
171         /* update the implied port details */
172         ulp_rte_parser_implicit_act_port_process(params);
173         return BNXT_TF_RC_SUCCESS;
174 }
175
176 /*
177  * Function to handle the post processing of the computed
178  * fields for the interface.
179  */
180 static void
181 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
182 {
183         uint32_t ifindex;
184         uint16_t port_id, parif;
185         uint32_t mtype;
186         enum bnxt_ulp_direction_type dir;
187
188         /* get the direction details */
189         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
190
191         /* read the port id details */
192         port_id = ULP_COMP_FLD_IDX_RD(params,
193                                       BNXT_ULP_CF_IDX_INCOMING_IF);
194         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
195                                               port_id,
196                                               &ifindex)) {
197                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
198                 return;
199         }
200
201         if (dir == BNXT_ULP_DIR_INGRESS) {
202                 /* Set port PARIF */
203                 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
204                                           BNXT_ULP_PHY_PORT_PARIF, &parif)) {
205                         BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
206                         return;
207                 }
208                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
209                                     parif);
210         } else {
211                 /* Get the match port type */
212                 mtype = ULP_COMP_FLD_IDX_RD(params,
213                                             BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
214                 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
215                         ULP_COMP_FLD_IDX_WR(params,
216                                             BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
217                                             1);
218                         /* Set VF func PARIF */
219                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
220                                                   BNXT_ULP_VF_FUNC_PARIF,
221                                                   &parif)) {
222                                 BNXT_TF_DBG(ERR,
223                                             "ParseErr:ifindex is not valid\n");
224                                 return;
225                         }
226                         ULP_COMP_FLD_IDX_WR(params,
227                                             BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
228                                             parif);
229
230                 } else {
231                         /* Set DRV func PARIF */
232                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
233                                                   BNXT_ULP_DRV_FUNC_PARIF,
234                                                   &parif)) {
235                                 BNXT_TF_DBG(ERR,
236                                             "ParseErr:ifindex is not valid\n");
237                                 return;
238                         }
239                         ULP_COMP_FLD_IDX_WR(params,
240                                             BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
241                                             parif);
242                 }
243         }
244 }
245
246 static int32_t
247 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
248 {
249         enum bnxt_ulp_intf_type match_port_type, act_port_type;
250         enum bnxt_ulp_direction_type dir;
251         uint32_t act_port_set;
252
253         /* Get the computed details */
254         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
255         match_port_type = ULP_COMP_FLD_IDX_RD(params,
256                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
257         act_port_type = ULP_COMP_FLD_IDX_RD(params,
258                                             BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
259         act_port_set = ULP_COMP_FLD_IDX_RD(params,
260                                            BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
261
262         /* set the flow direction in the proto and action header */
263         if (dir == BNXT_ULP_DIR_EGRESS) {
264                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
265                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
266                 ULP_BITMAP_SET(params->act_bitmap.bits,
267                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
268         }
269
270         /* calculate the VF to VF flag */
271         if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
272             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
273                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
274
275         /* Update the decrement ttl computational fields */
276         if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
277                              BNXT_ULP_ACT_BIT_DEC_TTL)) {
278                 /*
279                  * Check that vxlan proto is included and vxlan decap
280                  * action is not set then decrement tunnel ttl.
281                  * Similarly add GRE and NVGRE in future.
282                  */
283                 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
284                                       BNXT_ULP_HDR_BIT_T_VXLAN) &&
285                     !ULP_BITMAP_ISSET(params->act_bitmap.bits,
286                                       BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
287                         ULP_COMP_FLD_IDX_WR(params,
288                                             BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
289                 } else {
290                         ULP_COMP_FLD_IDX_WR(params,
291                                             BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
292                 }
293         }
294
295         /* Merge the hdr_fp_bit into the proto header bit */
296         params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
297
298         /* Update the comp fld fid */
299         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
300
301         /* Update the computed interface parameters */
302         bnxt_ulp_comp_fld_intf_update(params);
303
304         /* TBD: Handle the flow rejection scenarios */
305         return 0;
306 }
307
308 /*
309  * Function to handle the post processing of the parsing details
310  */
311 int32_t
312 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
313 {
314         ulp_post_process_normal_flow(params);
315         return ulp_post_process_tun_flow(params);
316 }
317
318 /*
319  * Function to compute the flow direction based on the match port details
320  */
321 static void
322 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
323 {
324         enum bnxt_ulp_intf_type match_port_type;
325
326         /* Get the match port type */
327         match_port_type = ULP_COMP_FLD_IDX_RD(params,
328                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
329
330         /* If ingress flow and matchport is vf rep then dir is egress*/
331         if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
332             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
333                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
334                                     BNXT_ULP_DIR_EGRESS);
335         } else {
336                 /* Assign the input direction */
337                 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
338                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
339                                             BNXT_ULP_DIR_INGRESS);
340                 else
341                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
342                                             BNXT_ULP_DIR_EGRESS);
343         }
344 }
345
346 /* Function to handle the parsing of RTE Flow item PF Header. */
347 static int32_t
348 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
349                         uint32_t ifindex,
350                         uint16_t mask)
351 {
352         uint16_t svif;
353         enum bnxt_ulp_direction_type dir;
354         struct ulp_rte_hdr_field *hdr_field;
355         enum bnxt_ulp_svif_type svif_type;
356         enum bnxt_ulp_intf_type port_type;
357
358         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
359             BNXT_ULP_INVALID_SVIF_VAL) {
360                 BNXT_TF_DBG(ERR,
361                             "SVIF already set,multiple source not support'd\n");
362                 return BNXT_TF_RC_ERROR;
363         }
364
365         /* Get port type details */
366         port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
367         if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
368                 BNXT_TF_DBG(ERR, "Invalid port type\n");
369                 return BNXT_TF_RC_ERROR;
370         }
371
372         /* Update the match port type */
373         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
374
375         /* compute the direction */
376         bnxt_ulp_rte_parser_direction_compute(params);
377
378         /* Get the computed direction */
379         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
380         if (dir == BNXT_ULP_DIR_INGRESS) {
381                 svif_type = BNXT_ULP_PHY_PORT_SVIF;
382         } else {
383                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
384                         svif_type = BNXT_ULP_VF_FUNC_SVIF;
385                 else
386                         svif_type = BNXT_ULP_DRV_FUNC_SVIF;
387         }
388         ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
389                              &svif);
390         svif = rte_cpu_to_be_16(svif);
391         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
392         memcpy(hdr_field->spec, &svif, sizeof(svif));
393         memcpy(hdr_field->mask, &mask, sizeof(mask));
394         hdr_field->size = sizeof(svif);
395         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
396                             rte_be_to_cpu_16(svif));
397         return BNXT_TF_RC_SUCCESS;
398 }
399
400 /* Function to handle the parsing of the RTE port id */
401 int32_t
402 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
403 {
404         uint16_t port_id = 0;
405         uint16_t svif_mask = 0xFFFF;
406         uint32_t ifindex;
407         int32_t rc = BNXT_TF_RC_ERROR;
408
409         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
410             BNXT_ULP_INVALID_SVIF_VAL)
411                 return BNXT_TF_RC_SUCCESS;
412
413         /* SVIF not set. So get the port id */
414         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
415
416         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
417                                               port_id,
418                                               &ifindex)) {
419                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
420                 return rc;
421         }
422
423         /* Update the SVIF details */
424         rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
425         return rc;
426 }
427
428 /* Function to handle the implicit action port id */
429 int32_t
430 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
431 {
432         struct rte_flow_action action_item = {0};
433         struct rte_flow_action_port_id port_id = {0};
434
435         /* Read the action port set bit */
436         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
437                 /* Already set, so just exit */
438                 return BNXT_TF_RC_SUCCESS;
439         }
440         port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
441         action_item.conf = &port_id;
442
443         /* Update the action port based on incoming port */
444         ulp_rte_port_id_act_handler(&action_item, params);
445
446         /* Reset the action port set bit */
447         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
448         return BNXT_TF_RC_SUCCESS;
449 }
450
451 /* Function to handle the parsing of RTE Flow item PF Header. */
452 int32_t
453 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
454                        struct ulp_rte_parser_params *params)
455 {
456         uint16_t port_id = 0;
457         uint16_t svif_mask = 0xFFFF;
458         uint32_t ifindex;
459
460         /* Get the implicit port id */
461         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
462
463         /* perform the conversion from dpdk port to bnxt ifindex */
464         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
465                                               port_id,
466                                               &ifindex)) {
467                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
468                 return BNXT_TF_RC_ERROR;
469         }
470
471         /* Update the SVIF details */
472         return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
473 }
474
475 /* Function to handle the parsing of RTE Flow item VF Header. */
476 int32_t
477 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
478                        struct ulp_rte_parser_params *params)
479 {
480         const struct rte_flow_item_vf *vf_spec = item->spec;
481         const struct rte_flow_item_vf *vf_mask = item->mask;
482         uint16_t mask = 0;
483         uint32_t ifindex;
484         int32_t rc = BNXT_TF_RC_PARSE_ERR;
485
486         /* Get VF rte_flow_item for Port details */
487         if (!vf_spec) {
488                 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
489                 return rc;
490         }
491         if (!vf_mask) {
492                 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
493                 return rc;
494         }
495         mask = vf_mask->id;
496
497         /* perform the conversion from VF Func id to bnxt ifindex */
498         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
499                                                  vf_spec->id,
500                                                  &ifindex)) {
501                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
502                 return rc;
503         }
504         /* Update the SVIF details */
505         return ulp_rte_parser_svif_set(params, ifindex, mask);
506 }
507
508 /* Function to handle the parsing of RTE Flow item port id  Header. */
509 int32_t
510 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
511                             struct ulp_rte_parser_params *params)
512 {
513         const struct rte_flow_item_port_id *port_spec = item->spec;
514         const struct rte_flow_item_port_id *port_mask = item->mask;
515         uint16_t mask = 0;
516         int32_t rc = BNXT_TF_RC_PARSE_ERR;
517         uint32_t ifindex;
518
519         if (!port_spec) {
520                 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
521                 return rc;
522         }
523         if (!port_mask) {
524                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
525                 return rc;
526         }
527         mask = port_mask->id;
528
529         /* perform the conversion from dpdk port to bnxt ifindex */
530         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
531                                               port_spec->id,
532                                               &ifindex)) {
533                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
534                 return rc;
535         }
536         /* Update the SVIF details */
537         return ulp_rte_parser_svif_set(params, ifindex, mask);
538 }
539
540 /* Function to handle the parsing of RTE Flow item phy port Header. */
541 int32_t
542 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
543                              struct ulp_rte_parser_params *params)
544 {
545         const struct rte_flow_item_phy_port *port_spec = item->spec;
546         const struct rte_flow_item_phy_port *port_mask = item->mask;
547         uint16_t mask = 0;
548         int32_t rc = BNXT_TF_RC_ERROR;
549         uint16_t svif;
550         enum bnxt_ulp_direction_type dir;
551         struct ulp_rte_hdr_field *hdr_field;
552
553         /* Copy the rte_flow_item for phy port into hdr_field */
554         if (!port_spec) {
555                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
556                 return rc;
557         }
558         if (!port_mask) {
559                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
560                 return rc;
561         }
562         mask = port_mask->index;
563
564         /* Update the match port type */
565         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
566                             BNXT_ULP_INTF_TYPE_PHY_PORT);
567
568         /* Compute the Hw direction */
569         bnxt_ulp_rte_parser_direction_compute(params);
570
571         /* Direction validation */
572         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
573         if (dir == BNXT_ULP_DIR_EGRESS) {
574                 BNXT_TF_DBG(ERR,
575                             "Parse Err:Phy ports are valid only for ingress\n");
576                 return BNXT_TF_RC_PARSE_ERR;
577         }
578
579         /* Get the physical port details from port db */
580         rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
581                                            &svif);
582         if (rc) {
583                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
584                 return BNXT_TF_RC_PARSE_ERR;
585         }
586
587         /* Update the SVIF details */
588         svif = rte_cpu_to_be_16(svif);
589         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
590         memcpy(hdr_field->spec, &svif, sizeof(svif));
591         memcpy(hdr_field->mask, &mask, sizeof(mask));
592         hdr_field->size = sizeof(svif);
593         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
594                             rte_be_to_cpu_16(svif));
595         return BNXT_TF_RC_SUCCESS;
596 }
597
598 /* Function to handle the update of proto header based on field values */
599 static void
600 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
601                              uint16_t type, uint32_t in_flag)
602 {
603         if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
604                 if (in_flag) {
605                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
606                                        BNXT_ULP_HDR_BIT_I_IPV4);
607                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
608                 } else {
609                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
610                                        BNXT_ULP_HDR_BIT_O_IPV4);
611                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
612                 }
613         } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
614                 if (in_flag) {
615                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
616                                        BNXT_ULP_HDR_BIT_I_IPV6);
617                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
618                 } else {
619                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
620                                        BNXT_ULP_HDR_BIT_O_IPV6);
621                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
622                 }
623         }
624 }
625
626 /* Internal Function to identify broadcast or multicast packets */
627 static int32_t
628 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
629 {
630         if (rte_is_multicast_ether_addr(eth_addr) ||
631             rte_is_broadcast_ether_addr(eth_addr)) {
632                 BNXT_TF_DBG(DEBUG,
633                             "No support for bcast or mcast addr offload\n");
634                 return 1;
635         }
636         return 0;
637 }
638
639 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
640 int32_t
641 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
642                         struct ulp_rte_parser_params *params)
643 {
644         const struct rte_flow_item_eth *eth_spec = item->spec;
645         const struct rte_flow_item_eth *eth_mask = item->mask;
646         struct ulp_rte_hdr_field *field;
647         uint32_t idx = params->field_idx;
648         uint32_t size;
649         uint16_t eth_type = 0;
650         uint32_t inner_flag = 0;
651
652         /*
653          * Copy the rte_flow_item for eth into hdr_field using ethernet
654          * header fields
655          */
656         if (eth_spec) {
657                 size = sizeof(eth_spec->dst.addr_bytes);
658                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
659                                                 eth_spec->dst.addr_bytes,
660                                                 size);
661                 /* Todo: work around to avoid multicast and broadcast addr */
662                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->dst))
663                         return BNXT_TF_RC_PARSE_ERR;
664
665                 size = sizeof(eth_spec->src.addr_bytes);
666                 field = ulp_rte_parser_fld_copy(field,
667                                                 eth_spec->src.addr_bytes,
668                                                 size);
669                 /* Todo: work around to avoid multicast and broadcast addr */
670                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->src))
671                         return BNXT_TF_RC_PARSE_ERR;
672
673                 field = ulp_rte_parser_fld_copy(field,
674                                                 &eth_spec->type,
675                                                 sizeof(eth_spec->type));
676                 eth_type = eth_spec->type;
677         }
678         if (eth_mask) {
679                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
680                                        sizeof(eth_mask->dst.addr_bytes));
681                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
682                                        sizeof(eth_mask->src.addr_bytes));
683                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
684                                        sizeof(eth_mask->type));
685         }
686         /* Add number of Eth header elements */
687         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
688
689         /* Update the protocol hdr bitmap */
690         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
691                              BNXT_ULP_HDR_BIT_O_ETH) ||
692             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
693                              BNXT_ULP_HDR_BIT_O_IPV4) ||
694             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
695                              BNXT_ULP_HDR_BIT_O_IPV6) ||
696             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697                              BNXT_ULP_HDR_BIT_O_UDP) ||
698             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699                              BNXT_ULP_HDR_BIT_O_TCP)) {
700                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
701                 inner_flag = 1;
702         } else {
703                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
704         }
705         /* Update the field protocol hdr bitmap */
706         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
707
708         return BNXT_TF_RC_SUCCESS;
709 }
710
711 /* Function to handle the parsing of RTE Flow item Vlan Header. */
712 int32_t
713 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
714                          struct ulp_rte_parser_params *params)
715 {
716         const struct rte_flow_item_vlan *vlan_spec = item->spec;
717         const struct rte_flow_item_vlan *vlan_mask = item->mask;
718         struct ulp_rte_hdr_field *field;
719         struct ulp_rte_hdr_bitmap       *hdr_bit;
720         uint32_t idx = params->field_idx;
721         uint16_t vlan_tag, priority;
722         uint32_t outer_vtag_num;
723         uint32_t inner_vtag_num;
724         uint16_t eth_type = 0;
725         uint32_t inner_flag = 0;
726
727         /*
728          * Copy the rte_flow_item for vlan into hdr_field using Vlan
729          * header fields
730          */
731         if (vlan_spec) {
732                 vlan_tag = ntohs(vlan_spec->tci);
733                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
734                 vlan_tag &= ULP_VLAN_TAG_MASK;
735                 vlan_tag = htons(vlan_tag);
736
737                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
738                                                 &priority,
739                                                 sizeof(priority));
740                 field = ulp_rte_parser_fld_copy(field,
741                                                 &vlan_tag,
742                                                 sizeof(vlan_tag));
743                 field = ulp_rte_parser_fld_copy(field,
744                                                 &vlan_spec->inner_type,
745                                                 sizeof(vlan_spec->inner_type));
746                 eth_type = vlan_spec->inner_type;
747         }
748
749         if (vlan_mask) {
750                 vlan_tag = ntohs(vlan_mask->tci);
751                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
752                 vlan_tag &= 0xfff;
753
754                 /*
755                  * the storage for priority and vlan tag is 2 bytes
756                  * The mask of priority which is 3 bits if it is all 1's
757                  * then make the rest bits 13 bits as 1's
758                  * so that it is matched as exact match.
759                  */
760                 if (priority == ULP_VLAN_PRIORITY_MASK)
761                         priority |= ~ULP_VLAN_PRIORITY_MASK;
762                 if (vlan_tag == ULP_VLAN_TAG_MASK)
763                         vlan_tag |= ~ULP_VLAN_TAG_MASK;
764                 vlan_tag = htons(vlan_tag);
765
766                 /*
767                  * The priority field is ignored since OVS is setting it as
768                  * wild card match and it is not supported. This is a work
769                  * around and shall be addressed in the future.
770                  */
771                 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
772                                          sizeof(priority));
773
774                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
775                                        sizeof(vlan_tag));
776                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
777                                        sizeof(vlan_mask->inner_type));
778         }
779         /* Set the field index to new incremented value */
780         params->field_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
781
782         /* Get the outer tag and inner tag counts */
783         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
784                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
785         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
786                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
787
788         /* Update the hdr_bitmap of the vlans */
789         hdr_bit = &params->hdr_bitmap;
790         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
791             !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
792             !outer_vtag_num) {
793                 /* Update the vlan tag num */
794                 outer_vtag_num++;
795                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
796                                     outer_vtag_num);
797                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
798                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
799                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
800                                BNXT_ULP_HDR_BIT_OO_VLAN);
801         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
802                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
803                    outer_vtag_num == 1) {
804                 /* update the vlan tag num */
805                 outer_vtag_num++;
806                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
807                                     outer_vtag_num);
808                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
809                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
810                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
811                                BNXT_ULP_HDR_BIT_OI_VLAN);
812         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
813                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
814                    !inner_vtag_num) {
815                 /* update the vlan tag num */
816                 inner_vtag_num++;
817                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
818                                     inner_vtag_num);
819                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
820                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
821                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
822                                BNXT_ULP_HDR_BIT_IO_VLAN);
823                 inner_flag = 1;
824         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
825                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
826                    inner_vtag_num == 1) {
827                 /* update the vlan tag num */
828                 inner_vtag_num++;
829                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
830                                     inner_vtag_num);
831                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
832                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
833                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
834                                BNXT_ULP_HDR_BIT_II_VLAN);
835                 inner_flag = 1;
836         } else {
837                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
838                 return BNXT_TF_RC_ERROR;
839         }
840         /* Update the field protocol hdr bitmap */
841         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
842         return BNXT_TF_RC_SUCCESS;
843 }
844
845 /* Function to handle the update of proto header based on field values */
846 static void
847 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
848                              uint8_t proto, uint32_t in_flag)
849 {
850         if (proto == IPPROTO_UDP) {
851                 if (in_flag) {
852                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
853                                        BNXT_ULP_HDR_BIT_I_UDP);
854                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
855                 } else {
856                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
857                                        BNXT_ULP_HDR_BIT_O_UDP);
858                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
859                 }
860         } else if (proto == IPPROTO_TCP) {
861                 if (in_flag) {
862                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
863                                        BNXT_ULP_HDR_BIT_I_TCP);
864                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
865                 } else {
866                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
867                                        BNXT_ULP_HDR_BIT_O_TCP);
868                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
869                 }
870         }
871 }
872
873 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
874 int32_t
875 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
876                          struct ulp_rte_parser_params *params)
877 {
878         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
879         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
880         struct ulp_rte_hdr_field *field;
881         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
882         uint32_t idx = params->field_idx;
883         uint32_t size;
884         uint8_t proto = 0;
885         uint32_t inner_flag = 0;
886         uint32_t cnt;
887
888         /* validate there are no 3rd L3 header */
889         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
890         if (cnt == 2) {
891                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
892                 return BNXT_TF_RC_ERROR;
893         }
894
895         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
896                               BNXT_ULP_HDR_BIT_O_ETH) &&
897             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
898                               BNXT_ULP_HDR_BIT_I_ETH)) {
899                 /* Since F2 flow does not include eth item, when parser detects
900                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
901                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
902                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
903                  * This will allow the parser post processor to update the
904                  * t_dmac in hdr_field[o_eth.dmac]
905                  */
906                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
907                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
908                 params->field_idx = idx;
909         }
910
911         /*
912          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
913          * header fields
914          */
915         if (ipv4_spec) {
916                 size = sizeof(ipv4_spec->hdr.version_ihl);
917                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
918                                                 &ipv4_spec->hdr.version_ihl,
919                                                 size);
920                 size = sizeof(ipv4_spec->hdr.type_of_service);
921                 field = ulp_rte_parser_fld_copy(field,
922                                                 &ipv4_spec->hdr.type_of_service,
923                                                 size);
924                 size = sizeof(ipv4_spec->hdr.total_length);
925                 field = ulp_rte_parser_fld_copy(field,
926                                                 &ipv4_spec->hdr.total_length,
927                                                 size);
928                 size = sizeof(ipv4_spec->hdr.packet_id);
929                 field = ulp_rte_parser_fld_copy(field,
930                                                 &ipv4_spec->hdr.packet_id,
931                                                 size);
932                 size = sizeof(ipv4_spec->hdr.fragment_offset);
933                 field = ulp_rte_parser_fld_copy(field,
934                                                 &ipv4_spec->hdr.fragment_offset,
935                                                 size);
936                 size = sizeof(ipv4_spec->hdr.time_to_live);
937                 field = ulp_rte_parser_fld_copy(field,
938                                                 &ipv4_spec->hdr.time_to_live,
939                                                 size);
940                 size = sizeof(ipv4_spec->hdr.next_proto_id);
941                 field = ulp_rte_parser_fld_copy(field,
942                                                 &ipv4_spec->hdr.next_proto_id,
943                                                 size);
944                 proto = ipv4_spec->hdr.next_proto_id;
945                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
946                 field = ulp_rte_parser_fld_copy(field,
947                                                 &ipv4_spec->hdr.hdr_checksum,
948                                                 size);
949                 size = sizeof(ipv4_spec->hdr.src_addr);
950                 field = ulp_rte_parser_fld_copy(field,
951                                                 &ipv4_spec->hdr.src_addr,
952                                                 size);
953                 size = sizeof(ipv4_spec->hdr.dst_addr);
954                 field = ulp_rte_parser_fld_copy(field,
955                                                 &ipv4_spec->hdr.dst_addr,
956                                                 size);
957         }
958         if (ipv4_mask) {
959                 ulp_rte_prsr_mask_copy(params, &idx,
960                                        &ipv4_mask->hdr.version_ihl,
961                                        sizeof(ipv4_mask->hdr.version_ihl));
962                 /*
963                  * The tos field is ignored since OVS is setting it as wild card
964                  * match and it is not supported. This is a work around and
965                  * shall be addressed in the future.
966                  */
967                 ulp_rte_prsr_mask_ignore(params, &idx,
968                                          &ipv4_mask->hdr.type_of_service,
969                                          sizeof(ipv4_mask->hdr.type_of_service)
970                                          );
971
972                 ulp_rte_prsr_mask_copy(params, &idx,
973                                        &ipv4_mask->hdr.total_length,
974                                        sizeof(ipv4_mask->hdr.total_length));
975                 ulp_rte_prsr_mask_copy(params, &idx,
976                                        &ipv4_mask->hdr.packet_id,
977                                        sizeof(ipv4_mask->hdr.packet_id));
978                 ulp_rte_prsr_mask_copy(params, &idx,
979                                        &ipv4_mask->hdr.fragment_offset,
980                                        sizeof(ipv4_mask->hdr.fragment_offset));
981                 ulp_rte_prsr_mask_copy(params, &idx,
982                                        &ipv4_mask->hdr.time_to_live,
983                                        sizeof(ipv4_mask->hdr.time_to_live));
984                 ulp_rte_prsr_mask_copy(params, &idx,
985                                        &ipv4_mask->hdr.next_proto_id,
986                                        sizeof(ipv4_mask->hdr.next_proto_id));
987                 ulp_rte_prsr_mask_copy(params, &idx,
988                                        &ipv4_mask->hdr.hdr_checksum,
989                                        sizeof(ipv4_mask->hdr.hdr_checksum));
990                 ulp_rte_prsr_mask_copy(params, &idx,
991                                        &ipv4_mask->hdr.src_addr,
992                                        sizeof(ipv4_mask->hdr.src_addr));
993                 ulp_rte_prsr_mask_copy(params, &idx,
994                                        &ipv4_mask->hdr.dst_addr,
995                                        sizeof(ipv4_mask->hdr.dst_addr));
996         }
997         /* Add the number of ipv4 header elements */
998         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
999
1000         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1001         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1002             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1003                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1004                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1005                 inner_flag = 1;
1006         } else {
1007                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1008                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1009         }
1010
1011         /* Update the field protocol hdr bitmap */
1012         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1013         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1014         return BNXT_TF_RC_SUCCESS;
1015 }
1016
1017 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1018 int32_t
1019 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1020                          struct ulp_rte_parser_params *params)
1021 {
1022         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1023         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1024         struct ulp_rte_hdr_field *field;
1025         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1026         uint32_t idx = params->field_idx;
1027         uint32_t size;
1028         uint32_t vtcf, vtcf_mask;
1029         uint8_t proto = 0;
1030         uint32_t inner_flag = 0;
1031         uint32_t cnt;
1032
1033         /* validate there are no 3rd L3 header */
1034         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1035         if (cnt == 2) {
1036                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1037                 return BNXT_TF_RC_ERROR;
1038         }
1039
1040         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1041                               BNXT_ULP_HDR_BIT_O_ETH) &&
1042             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1043                               BNXT_ULP_HDR_BIT_I_ETH)) {
1044                 /* Since F2 flow does not include eth item, when parser detects
1045                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1046                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1047                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1048                  * This will allow the parser post processor to update the
1049                  * t_dmac in hdr_field[o_eth.dmac]
1050                  */
1051                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1052                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
1053                 params->field_idx = idx;
1054         }
1055
1056         /*
1057          * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1058          * header fields
1059          */
1060         if (ipv6_spec) {
1061                 size = sizeof(ipv6_spec->hdr.vtc_flow);
1062
1063                 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1064                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1065                                                 &vtcf,
1066                                                 size);
1067
1068                 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1069                 field = ulp_rte_parser_fld_copy(field,
1070                                                 &vtcf,
1071                                                 size);
1072
1073                 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1074                 field = ulp_rte_parser_fld_copy(field,
1075                                                 &vtcf,
1076                                                 size);
1077
1078                 size = sizeof(ipv6_spec->hdr.payload_len);
1079                 field = ulp_rte_parser_fld_copy(field,
1080                                                 &ipv6_spec->hdr.payload_len,
1081                                                 size);
1082                 size = sizeof(ipv6_spec->hdr.proto);
1083                 field = ulp_rte_parser_fld_copy(field,
1084                                                 &ipv6_spec->hdr.proto,
1085                                                 size);
1086                 proto = ipv6_spec->hdr.proto;
1087                 size = sizeof(ipv6_spec->hdr.hop_limits);
1088                 field = ulp_rte_parser_fld_copy(field,
1089                                                 &ipv6_spec->hdr.hop_limits,
1090                                                 size);
1091                 size = sizeof(ipv6_spec->hdr.src_addr);
1092                 field = ulp_rte_parser_fld_copy(field,
1093                                                 &ipv6_spec->hdr.src_addr,
1094                                                 size);
1095                 size = sizeof(ipv6_spec->hdr.dst_addr);
1096                 field = ulp_rte_parser_fld_copy(field,
1097                                                 &ipv6_spec->hdr.dst_addr,
1098                                                 size);
1099         }
1100         if (ipv6_mask) {
1101                 size = sizeof(ipv6_mask->hdr.vtc_flow);
1102
1103                 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1104                 ulp_rte_prsr_mask_copy(params, &idx,
1105                                        &vtcf_mask,
1106                                        size);
1107                 /*
1108                  * The TC and flow label field are ignored since OVS is setting
1109                  * it for match and it is not supported.
1110                  * This is a work around and
1111                  * shall be addressed in the future.
1112                  */
1113                 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1114                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1115                 vtcf_mask =
1116                         BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1117                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1118
1119                 ulp_rte_prsr_mask_copy(params, &idx,
1120                                        &ipv6_mask->hdr.payload_len,
1121                                        sizeof(ipv6_mask->hdr.payload_len));
1122                 ulp_rte_prsr_mask_copy(params, &idx,
1123                                        &ipv6_mask->hdr.proto,
1124                                        sizeof(ipv6_mask->hdr.proto));
1125                 ulp_rte_prsr_mask_copy(params, &idx,
1126                                        &ipv6_mask->hdr.hop_limits,
1127                                        sizeof(ipv6_mask->hdr.hop_limits));
1128                 ulp_rte_prsr_mask_copy(params, &idx,
1129                                        &ipv6_mask->hdr.src_addr,
1130                                        sizeof(ipv6_mask->hdr.src_addr));
1131                 ulp_rte_prsr_mask_copy(params, &idx,
1132                                        &ipv6_mask->hdr.dst_addr,
1133                                        sizeof(ipv6_mask->hdr.dst_addr));
1134         }
1135         /* add number of ipv6 header elements */
1136         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1137
1138         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1139         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1140             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1141                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1142                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1143                 inner_flag = 1;
1144         } else {
1145                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1146                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1147         }
1148
1149         /* Update the field protocol hdr bitmap */
1150         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1151         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1152
1153         return BNXT_TF_RC_SUCCESS;
1154 }
1155
1156 /* Function to handle the update of proto header based on field values */
1157 static void
1158 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1159                              uint16_t dst_port)
1160 {
1161         if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1162                 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1163                                BNXT_ULP_HDR_BIT_T_VXLAN);
1164                 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1165         }
1166 }
1167
1168 /* Function to handle the parsing of RTE Flow item UDP Header. */
1169 int32_t
1170 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1171                         struct ulp_rte_parser_params *params)
1172 {
1173         const struct rte_flow_item_udp *udp_spec = item->spec;
1174         const struct rte_flow_item_udp *udp_mask = item->mask;
1175         struct ulp_rte_hdr_field *field;
1176         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1177         uint32_t idx = params->field_idx;
1178         uint32_t size;
1179         uint16_t dport = 0, sport = 0;
1180         uint32_t cnt;
1181
1182         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1183         if (cnt == 2) {
1184                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1185                 return BNXT_TF_RC_ERROR;
1186         }
1187
1188         /*
1189          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1190          * header fields
1191          */
1192         if (udp_spec) {
1193                 size = sizeof(udp_spec->hdr.src_port);
1194                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1195                                                 &udp_spec->hdr.src_port,
1196                                                 size);
1197                 sport = udp_spec->hdr.src_port;
1198                 size = sizeof(udp_spec->hdr.dst_port);
1199                 field = ulp_rte_parser_fld_copy(field,
1200                                                 &udp_spec->hdr.dst_port,
1201                                                 size);
1202                 dport = udp_spec->hdr.dst_port;
1203                 size = sizeof(udp_spec->hdr.dgram_len);
1204                 field = ulp_rte_parser_fld_copy(field,
1205                                                 &udp_spec->hdr.dgram_len,
1206                                                 size);
1207                 size = sizeof(udp_spec->hdr.dgram_cksum);
1208                 field = ulp_rte_parser_fld_copy(field,
1209                                                 &udp_spec->hdr.dgram_cksum,
1210                                                 size);
1211         }
1212         if (udp_mask) {
1213                 ulp_rte_prsr_mask_copy(params, &idx,
1214                                        &udp_mask->hdr.src_port,
1215                                        sizeof(udp_mask->hdr.src_port));
1216                 ulp_rte_prsr_mask_copy(params, &idx,
1217                                        &udp_mask->hdr.dst_port,
1218                                        sizeof(udp_mask->hdr.dst_port));
1219                 ulp_rte_prsr_mask_copy(params, &idx,
1220                                        &udp_mask->hdr.dgram_len,
1221                                        sizeof(udp_mask->hdr.dgram_len));
1222                 ulp_rte_prsr_mask_copy(params, &idx,
1223                                        &udp_mask->hdr.dgram_cksum,
1224                                        sizeof(udp_mask->hdr.dgram_cksum));
1225         }
1226
1227         /* Add number of UDP header elements */
1228         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1229
1230         /* Set the udp header bitmap and computed l4 header bitmaps */
1231         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1232             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1233                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1234                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1235                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
1236                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
1237
1238         } else {
1239                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1240                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1241                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
1242                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
1243
1244                 /* Update the field protocol hdr bitmap */
1245                 ulp_rte_l4_proto_type_update(params, dport);
1246         }
1247         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1248         return BNXT_TF_RC_SUCCESS;
1249 }
1250
1251 /* Function to handle the parsing of RTE Flow item TCP Header. */
1252 int32_t
1253 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1254                         struct ulp_rte_parser_params *params)
1255 {
1256         const struct rte_flow_item_tcp *tcp_spec = item->spec;
1257         const struct rte_flow_item_tcp *tcp_mask = item->mask;
1258         struct ulp_rte_hdr_field *field;
1259         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1260         uint32_t idx = params->field_idx;
1261         uint16_t dport = 0, sport = 0;
1262         uint32_t size;
1263         uint32_t cnt;
1264
1265         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1266         if (cnt == 2) {
1267                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1268                 return BNXT_TF_RC_ERROR;
1269         }
1270
1271         /*
1272          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1273          * header fields
1274          */
1275         if (tcp_spec) {
1276                 sport = tcp_spec->hdr.src_port;
1277                 size = sizeof(tcp_spec->hdr.src_port);
1278                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1279                                                 &tcp_spec->hdr.src_port,
1280                                                 size);
1281                 dport = tcp_spec->hdr.dst_port;
1282                 size = sizeof(tcp_spec->hdr.dst_port);
1283                 field = ulp_rte_parser_fld_copy(field,
1284                                                 &tcp_spec->hdr.dst_port,
1285                                                 size);
1286                 size = sizeof(tcp_spec->hdr.sent_seq);
1287                 field = ulp_rte_parser_fld_copy(field,
1288                                                 &tcp_spec->hdr.sent_seq,
1289                                                 size);
1290                 size = sizeof(tcp_spec->hdr.recv_ack);
1291                 field = ulp_rte_parser_fld_copy(field,
1292                                                 &tcp_spec->hdr.recv_ack,
1293                                                 size);
1294                 size = sizeof(tcp_spec->hdr.data_off);
1295                 field = ulp_rte_parser_fld_copy(field,
1296                                                 &tcp_spec->hdr.data_off,
1297                                                 size);
1298                 size = sizeof(tcp_spec->hdr.tcp_flags);
1299                 field = ulp_rte_parser_fld_copy(field,
1300                                                 &tcp_spec->hdr.tcp_flags,
1301                                                 size);
1302                 size = sizeof(tcp_spec->hdr.rx_win);
1303                 field = ulp_rte_parser_fld_copy(field,
1304                                                 &tcp_spec->hdr.rx_win,
1305                                                 size);
1306                 size = sizeof(tcp_spec->hdr.cksum);
1307                 field = ulp_rte_parser_fld_copy(field,
1308                                                 &tcp_spec->hdr.cksum,
1309                                                 size);
1310                 size = sizeof(tcp_spec->hdr.tcp_urp);
1311                 field = ulp_rte_parser_fld_copy(field,
1312                                                 &tcp_spec->hdr.tcp_urp,
1313                                                 size);
1314         } else {
1315                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1316         }
1317
1318         if (tcp_mask) {
1319                 ulp_rte_prsr_mask_copy(params, &idx,
1320                                        &tcp_mask->hdr.src_port,
1321                                        sizeof(tcp_mask->hdr.src_port));
1322                 ulp_rte_prsr_mask_copy(params, &idx,
1323                                        &tcp_mask->hdr.dst_port,
1324                                        sizeof(tcp_mask->hdr.dst_port));
1325                 ulp_rte_prsr_mask_copy(params, &idx,
1326                                        &tcp_mask->hdr.sent_seq,
1327                                        sizeof(tcp_mask->hdr.sent_seq));
1328                 ulp_rte_prsr_mask_copy(params, &idx,
1329                                        &tcp_mask->hdr.recv_ack,
1330                                        sizeof(tcp_mask->hdr.recv_ack));
1331                 ulp_rte_prsr_mask_copy(params, &idx,
1332                                        &tcp_mask->hdr.data_off,
1333                                        sizeof(tcp_mask->hdr.data_off));
1334                 ulp_rte_prsr_mask_copy(params, &idx,
1335                                        &tcp_mask->hdr.tcp_flags,
1336                                        sizeof(tcp_mask->hdr.tcp_flags));
1337                 ulp_rte_prsr_mask_copy(params, &idx,
1338                                        &tcp_mask->hdr.rx_win,
1339                                        sizeof(tcp_mask->hdr.rx_win));
1340                 ulp_rte_prsr_mask_copy(params, &idx,
1341                                        &tcp_mask->hdr.cksum,
1342                                        sizeof(tcp_mask->hdr.cksum));
1343                 ulp_rte_prsr_mask_copy(params, &idx,
1344                                        &tcp_mask->hdr.tcp_urp,
1345                                        sizeof(tcp_mask->hdr.tcp_urp));
1346         }
1347         /* add number of TCP header elements */
1348         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1349
1350         /* Set the udp header bitmap and computed l4 header bitmaps */
1351         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1352             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1353                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1354                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1355                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SPORT, sport);
1356                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DPORT, dport);
1357         } else {
1358                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1359                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1360                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SPORT, sport);
1361                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DPORT, dport);
1362         }
1363         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1364         return BNXT_TF_RC_SUCCESS;
1365 }
1366
1367 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1368 int32_t
1369 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1370                           struct ulp_rte_parser_params *params)
1371 {
1372         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1373         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1374         struct ulp_rte_hdr_field *field;
1375         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1376         uint32_t idx = params->field_idx;
1377         uint32_t size;
1378
1379         /*
1380          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1381          * header fields
1382          */
1383         if (vxlan_spec) {
1384                 size = sizeof(vxlan_spec->flags);
1385                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1386                                                 &vxlan_spec->flags,
1387                                                 size);
1388                 size = sizeof(vxlan_spec->rsvd0);
1389                 field = ulp_rte_parser_fld_copy(field,
1390                                                 &vxlan_spec->rsvd0,
1391                                                 size);
1392                 size = sizeof(vxlan_spec->vni);
1393                 field = ulp_rte_parser_fld_copy(field,
1394                                                 &vxlan_spec->vni,
1395                                                 size);
1396                 size = sizeof(vxlan_spec->rsvd1);
1397                 field = ulp_rte_parser_fld_copy(field,
1398                                                 &vxlan_spec->rsvd1,
1399                                                 size);
1400         }
1401         if (vxlan_mask) {
1402                 ulp_rte_prsr_mask_copy(params, &idx,
1403                                        &vxlan_mask->flags,
1404                                        sizeof(vxlan_mask->flags));
1405                 ulp_rte_prsr_mask_copy(params, &idx,
1406                                        &vxlan_mask->rsvd0,
1407                                        sizeof(vxlan_mask->rsvd0));
1408                 ulp_rte_prsr_mask_copy(params, &idx,
1409                                        &vxlan_mask->vni,
1410                                        sizeof(vxlan_mask->vni));
1411                 ulp_rte_prsr_mask_copy(params, &idx,
1412                                        &vxlan_mask->rsvd1,
1413                                        sizeof(vxlan_mask->rsvd1));
1414         }
1415         /* Add number of vxlan header elements */
1416         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1417
1418         /* Update the hdr_bitmap with vxlan */
1419         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1420         return BNXT_TF_RC_SUCCESS;
1421 }
1422
1423 /* Function to handle the parsing of RTE Flow item void Header */
1424 int32_t
1425 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1426                          struct ulp_rte_parser_params *params __rte_unused)
1427 {
1428         return BNXT_TF_RC_SUCCESS;
1429 }
1430
1431 /* Function to handle the parsing of RTE Flow action void Header. */
1432 int32_t
1433 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1434                          struct ulp_rte_parser_params *params __rte_unused)
1435 {
1436         return BNXT_TF_RC_SUCCESS;
1437 }
1438
1439 /* Function to handle the parsing of RTE Flow action Mark Header. */
1440 int32_t
1441 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1442                          struct ulp_rte_parser_params *param)
1443 {
1444         const struct rte_flow_action_mark *mark;
1445         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1446         uint32_t mark_id;
1447
1448         mark = action_item->conf;
1449         if (mark) {
1450                 mark_id = tfp_cpu_to_be_32(mark->id);
1451                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1452                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1453
1454                 /* Update the hdr_bitmap with vxlan */
1455                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1456                 return BNXT_TF_RC_SUCCESS;
1457         }
1458         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1459         return BNXT_TF_RC_ERROR;
1460 }
1461
1462 /* Function to handle the parsing of RTE Flow action RSS Header. */
1463 int32_t
1464 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1465                         struct ulp_rte_parser_params *param)
1466 {
1467         const struct rte_flow_action_rss *rss = action_item->conf;
1468
1469         if (rss) {
1470                 /* Update the hdr_bitmap with vxlan */
1471                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1472                 return BNXT_TF_RC_SUCCESS;
1473         }
1474         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1475         return BNXT_TF_RC_ERROR;
1476 }
1477
1478 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1479 int32_t
1480 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1481                                 struct ulp_rte_parser_params *params)
1482 {
1483         const struct rte_flow_action_vxlan_encap *vxlan_encap;
1484         const struct rte_flow_item *item;
1485         const struct rte_flow_item_eth *eth_spec;
1486         const struct rte_flow_item_ipv4 *ipv4_spec;
1487         const struct rte_flow_item_ipv6 *ipv6_spec;
1488         struct rte_flow_item_vxlan vxlan_spec;
1489         uint32_t vlan_num = 0, vlan_size = 0;
1490         uint32_t ip_size = 0, ip_type = 0;
1491         uint32_t vxlan_size = 0;
1492         uint8_t *buff;
1493         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1494         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1495                                     0x00, 0x40, 0x11};
1496         /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1497         const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1498                                 0x00, 0x11, 0xf6};
1499         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1500         struct ulp_rte_act_prop *ap = &params->act_prop;
1501         const uint8_t *tmp_buff;
1502
1503         vxlan_encap = action_item->conf;
1504         if (!vxlan_encap) {
1505                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1506                 return BNXT_TF_RC_ERROR;
1507         }
1508
1509         item = vxlan_encap->definition;
1510         if (!item) {
1511                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1512                 return BNXT_TF_RC_ERROR;
1513         }
1514
1515         if (!ulp_rte_item_skip_void(&item, 0))
1516                 return BNXT_TF_RC_ERROR;
1517
1518         /* must have ethernet header */
1519         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1520                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1521                 return BNXT_TF_RC_ERROR;
1522         }
1523         eth_spec = item->spec;
1524         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1525         ulp_encap_buffer_copy(buff,
1526                               eth_spec->dst.addr_bytes,
1527                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1528                               ULP_BUFFER_ALIGN_8_BYTE);
1529
1530         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1531         ulp_encap_buffer_copy(buff,
1532                               eth_spec->src.addr_bytes,
1533                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1534                               ULP_BUFFER_ALIGN_8_BYTE);
1535
1536         /* Goto the next item */
1537         if (!ulp_rte_item_skip_void(&item, 1))
1538                 return BNXT_TF_RC_ERROR;
1539
1540         /* May have vlan header */
1541         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1542                 vlan_num++;
1543                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1544                 ulp_encap_buffer_copy(buff,
1545                                       item->spec,
1546                                       sizeof(struct rte_flow_item_vlan),
1547                                       ULP_BUFFER_ALIGN_8_BYTE);
1548
1549                 if (!ulp_rte_item_skip_void(&item, 1))
1550                         return BNXT_TF_RC_ERROR;
1551         }
1552
1553         /* may have two vlan headers */
1554         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1555                 vlan_num++;
1556                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1557                        sizeof(struct rte_flow_item_vlan)],
1558                        item->spec,
1559                        sizeof(struct rte_flow_item_vlan));
1560                 if (!ulp_rte_item_skip_void(&item, 1))
1561                         return BNXT_TF_RC_ERROR;
1562         }
1563         /* Update the vlan count and size of more than one */
1564         if (vlan_num) {
1565                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1566                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1567                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1568                        &vlan_num,
1569                        sizeof(uint32_t));
1570                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1571                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1572                        &vlan_size,
1573                        sizeof(uint32_t));
1574         }
1575
1576         /* L3 must be IPv4, IPv6 */
1577         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1578                 ipv4_spec = item->spec;
1579                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1580
1581                 /* copy the ipv4 details */
1582                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1583                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1584                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1585                         ulp_encap_buffer_copy(buff,
1586                                               def_ipv4_hdr,
1587                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1588                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1589                                               ULP_BUFFER_ALIGN_8_BYTE);
1590                 } else {
1591                         /* Total length being ignored in the ip hdr. */
1592                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1593                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1594                         ulp_encap_buffer_copy(buff,
1595                                               tmp_buff,
1596                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1597                                               ULP_BUFFER_ALIGN_8_BYTE);
1598                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1599                              BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1600                         ulp_encap_buffer_copy(buff,
1601                                               &ipv4_spec->hdr.version_ihl,
1602                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1603                                               ULP_BUFFER_ALIGN_8_BYTE);
1604                 }
1605
1606                 /* Update the dst ip address in ip encap buffer */
1607                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1608                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1609                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1610                 ulp_encap_buffer_copy(buff,
1611                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1612                                       sizeof(ipv4_spec->hdr.dst_addr),
1613                                       ULP_BUFFER_ALIGN_8_BYTE);
1614
1615                 /* Update the src ip address */
1616                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1617                         BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1618                         sizeof(ipv4_spec->hdr.src_addr)];
1619                 ulp_encap_buffer_copy(buff,
1620                                       (const uint8_t *)&ipv4_spec->hdr.src_addr,
1621                                       sizeof(ipv4_spec->hdr.src_addr),
1622                                       ULP_BUFFER_ALIGN_8_BYTE);
1623
1624                 /* Update the ip size details */
1625                 ip_size = tfp_cpu_to_be_32(ip_size);
1626                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1627                        &ip_size, sizeof(uint32_t));
1628
1629                 /* update the ip type */
1630                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1631                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1632                        &ip_type, sizeof(uint32_t));
1633
1634                 /* update the computed field to notify it is ipv4 header */
1635                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1636                                     1);
1637
1638                 if (!ulp_rte_item_skip_void(&item, 1))
1639                         return BNXT_TF_RC_ERROR;
1640         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1641                 ipv6_spec = item->spec;
1642                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1643
1644                 /* copy the ipv6 details */
1645                 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1646                 if (ulp_buffer_is_empty(tmp_buff,
1647                                         BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1648                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1649                         ulp_encap_buffer_copy(buff,
1650                                               def_ipv6_hdr,
1651                                               sizeof(def_ipv6_hdr),
1652                                               ULP_BUFFER_ALIGN_8_BYTE);
1653                 } else {
1654                         /* The payload length being ignored in the ip hdr. */
1655                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1656                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1657                         ulp_encap_buffer_copy(buff,
1658                                               tmp_buff,
1659                                               BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1660                                               ULP_BUFFER_ALIGN_8_BYTE);
1661                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1662                                 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1663                                 BNXT_ULP_ENCAP_IPV6_DO];
1664                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1665                         ulp_encap_buffer_copy(buff,
1666                                               tmp_buff,
1667                                               BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1668                                               ULP_BUFFER_ALIGN_8_BYTE);
1669                 }
1670                 /* Update the dst ip address in ip encap buffer */
1671                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1672                         sizeof(def_ipv6_hdr)];
1673                 ulp_encap_buffer_copy(buff,
1674                                       (const uint8_t *)ipv6_spec->hdr.dst_addr,
1675                                       sizeof(ipv6_spec->hdr.dst_addr),
1676                                       ULP_BUFFER_ALIGN_8_BYTE);
1677
1678                 /* Update the src ip address */
1679                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1680                 ulp_encap_buffer_copy(buff,
1681                                       (const uint8_t *)ipv6_spec->hdr.src_addr,
1682                                       sizeof(ipv6_spec->hdr.src_addr),
1683                                       ULP_BUFFER_ALIGN_16_BYTE);
1684
1685                 /* Update the ip size details */
1686                 ip_size = tfp_cpu_to_be_32(ip_size);
1687                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1688                        &ip_size, sizeof(uint32_t));
1689
1690                  /* update the ip type */
1691                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1692                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1693                        &ip_type, sizeof(uint32_t));
1694
1695                 /* update the computed field to notify it is ipv6 header */
1696                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1697                                     1);
1698
1699                 if (!ulp_rte_item_skip_void(&item, 1))
1700                         return BNXT_TF_RC_ERROR;
1701         } else {
1702                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1703                 return BNXT_TF_RC_ERROR;
1704         }
1705
1706         /* L4 is UDP */
1707         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1708                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1709                 return BNXT_TF_RC_ERROR;
1710         }
1711         /* copy the udp details */
1712         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1713                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1714                               ULP_BUFFER_ALIGN_8_BYTE);
1715
1716         if (!ulp_rte_item_skip_void(&item, 1))
1717                 return BNXT_TF_RC_ERROR;
1718
1719         /* Finally VXLAN */
1720         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1721                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1722                 return BNXT_TF_RC_ERROR;
1723         }
1724         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1725         /* copy the vxlan details */
1726         memcpy(&vxlan_spec, item->spec, vxlan_size);
1727         vxlan_spec.flags = 0x08;
1728         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1729         if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1730                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1731                                       vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1732         } else {
1733                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1734                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1735                 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1736                                       (const uint8_t *)&vxlan_spec.vni,
1737                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1738         }
1739         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1740         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1741                &vxlan_size, sizeof(uint32_t));
1742
1743         /* update the hdr_bitmap with vxlan */
1744         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
1745         return BNXT_TF_RC_SUCCESS;
1746 }
1747
1748 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1749 int32_t
1750 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1751                                 __rte_unused,
1752                                 struct ulp_rte_parser_params *params)
1753 {
1754         /* update the hdr_bitmap with vxlan */
1755         ULP_BITMAP_SET(params->act_bitmap.bits,
1756                        BNXT_ULP_ACT_BIT_VXLAN_DECAP);
1757         /* Update computational field with tunnel decap info */
1758         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1759         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1760         return BNXT_TF_RC_SUCCESS;
1761 }
1762
1763 /* Function to handle the parsing of RTE Flow action drop Header. */
1764 int32_t
1765 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1766                          struct ulp_rte_parser_params *params)
1767 {
1768         /* Update the hdr_bitmap with drop */
1769         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
1770         return BNXT_TF_RC_SUCCESS;
1771 }
1772
1773 /* Function to handle the parsing of RTE Flow action count. */
1774 int32_t
1775 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1776                           struct ulp_rte_parser_params *params)
1777
1778 {
1779         const struct rte_flow_action_count *act_count;
1780         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1781
1782         act_count = action_item->conf;
1783         if (act_count) {
1784                 if (act_count->shared) {
1785                         BNXT_TF_DBG(ERR,
1786                                     "Parse Error:Shared count not supported\n");
1787                         return BNXT_TF_RC_PARSE_ERR;
1788                 }
1789                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1790                        &act_count->id,
1791                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1792         }
1793
1794         /* Update the hdr_bitmap with count */
1795         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
1796         return BNXT_TF_RC_SUCCESS;
1797 }
1798
1799 /* Function to handle the parsing of action ports. */
1800 static int32_t
1801 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1802                             uint32_t ifindex)
1803 {
1804         enum bnxt_ulp_direction_type dir;
1805         uint16_t pid_s;
1806         uint32_t pid;
1807         struct ulp_rte_act_prop *act = &param->act_prop;
1808         enum bnxt_ulp_intf_type port_type;
1809         uint32_t vnic_type;
1810
1811         /* Get the direction */
1812         dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1813         if (dir == BNXT_ULP_DIR_EGRESS) {
1814                 /* For egress direction, fill vport */
1815                 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1816                         return BNXT_TF_RC_ERROR;
1817
1818                 pid = pid_s;
1819                 pid = rte_cpu_to_be_32(pid);
1820                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1821                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1822         } else {
1823                 /* For ingress direction, fill vnic */
1824                 port_type = ULP_COMP_FLD_IDX_RD(param,
1825                                                 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1826                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1827                         vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1828                 else
1829                         vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1830
1831                 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1832                                                  vnic_type, &pid_s))
1833                         return BNXT_TF_RC_ERROR;
1834
1835                 pid = pid_s;
1836                 pid = rte_cpu_to_be_32(pid);
1837                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1838                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1839         }
1840
1841         /* Update the action port set bit */
1842         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1843         return BNXT_TF_RC_SUCCESS;
1844 }
1845
1846 /* Function to handle the parsing of RTE Flow action PF. */
1847 int32_t
1848 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1849                        struct ulp_rte_parser_params *params)
1850 {
1851         uint32_t port_id;
1852         uint32_t ifindex;
1853         enum bnxt_ulp_intf_type intf_type;
1854
1855         /* Get the port id of the current device */
1856         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1857
1858         /* Get the port db ifindex */
1859         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1860                                               &ifindex)) {
1861                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1862                 return BNXT_TF_RC_ERROR;
1863         }
1864
1865         /* Check the port is PF port */
1866         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1867         if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1868                 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1869                 return BNXT_TF_RC_ERROR;
1870         }
1871         /* Update the action properties */
1872         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1873         return ulp_rte_parser_act_port_set(params, ifindex);
1874 }
1875
1876 /* Function to handle the parsing of RTE Flow action VF. */
1877 int32_t
1878 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1879                        struct ulp_rte_parser_params *params)
1880 {
1881         const struct rte_flow_action_vf *vf_action;
1882         uint32_t ifindex;
1883         enum bnxt_ulp_intf_type intf_type;
1884
1885         vf_action = action_item->conf;
1886         if (!vf_action) {
1887                 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1888                 return BNXT_TF_RC_PARSE_ERR;
1889         }
1890
1891         if (vf_action->original) {
1892                 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1893                 return BNXT_TF_RC_PARSE_ERR;
1894         }
1895
1896         /* Check the port is VF port */
1897         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1898                                                  &ifindex)) {
1899                 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1900                 return BNXT_TF_RC_ERROR;
1901         }
1902         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1903         if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1904             intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1905                 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1906                 return BNXT_TF_RC_ERROR;
1907         }
1908
1909         /* Update the action properties */
1910         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1911         return ulp_rte_parser_act_port_set(params, ifindex);
1912 }
1913
1914 /* Function to handle the parsing of RTE Flow action port_id. */
1915 int32_t
1916 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1917                             struct ulp_rte_parser_params *param)
1918 {
1919         const struct rte_flow_action_port_id *port_id = act_item->conf;
1920         uint32_t ifindex;
1921         enum bnxt_ulp_intf_type intf_type;
1922
1923         if (!port_id) {
1924                 BNXT_TF_DBG(ERR,
1925                             "ParseErr: Invalid Argument\n");
1926                 return BNXT_TF_RC_PARSE_ERR;
1927         }
1928         if (port_id->original) {
1929                 BNXT_TF_DBG(ERR,
1930                             "ParseErr:Portid Original not supported\n");
1931                 return BNXT_TF_RC_PARSE_ERR;
1932         }
1933
1934         /* Get the port db ifindex */
1935         if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1936                                               &ifindex)) {
1937                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1938                 return BNXT_TF_RC_ERROR;
1939         }
1940
1941         /* Get the intf type */
1942         intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1943         if (!intf_type) {
1944                 BNXT_TF_DBG(ERR, "Invalid port type\n");
1945                 return BNXT_TF_RC_ERROR;
1946         }
1947
1948         /* Set the action port */
1949         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1950         return ulp_rte_parser_act_port_set(param, ifindex);
1951 }
1952
1953 /* Function to handle the parsing of RTE Flow action phy_port. */
1954 int32_t
1955 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1956                              struct ulp_rte_parser_params *prm)
1957 {
1958         const struct rte_flow_action_phy_port *phy_port;
1959         uint32_t pid;
1960         int32_t rc;
1961         uint16_t pid_s;
1962         enum bnxt_ulp_direction_type dir;
1963
1964         phy_port = action_item->conf;
1965         if (!phy_port) {
1966                 BNXT_TF_DBG(ERR,
1967                             "ParseErr: Invalid Argument\n");
1968                 return BNXT_TF_RC_PARSE_ERR;
1969         }
1970
1971         if (phy_port->original) {
1972                 BNXT_TF_DBG(ERR,
1973                             "Parse Err:Port Original not supported\n");
1974                 return BNXT_TF_RC_PARSE_ERR;
1975         }
1976         dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1977         if (dir != BNXT_ULP_DIR_EGRESS) {
1978                 BNXT_TF_DBG(ERR,
1979                             "Parse Err:Phy ports are valid only for egress\n");
1980                 return BNXT_TF_RC_PARSE_ERR;
1981         }
1982         /* Get the physical port details from port db */
1983         rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1984                                             &pid_s);
1985         if (rc) {
1986                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1987                 return -EINVAL;
1988         }
1989
1990         pid = pid_s;
1991         pid = rte_cpu_to_be_32(pid);
1992         memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1993                &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1994
1995         /* Update the action port set bit */
1996         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1997         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1998                             BNXT_ULP_INTF_TYPE_PHY_PORT);
1999         return BNXT_TF_RC_SUCCESS;
2000 }
2001
2002 /* Function to handle the parsing of RTE Flow action pop vlan. */
2003 int32_t
2004 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2005                                 struct ulp_rte_parser_params *params)
2006 {
2007         /* Update the act_bitmap with pop */
2008         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2009         return BNXT_TF_RC_SUCCESS;
2010 }
2011
2012 /* Function to handle the parsing of RTE Flow action push vlan. */
2013 int32_t
2014 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2015                                  struct ulp_rte_parser_params *params)
2016 {
2017         const struct rte_flow_action_of_push_vlan *push_vlan;
2018         uint16_t ethertype;
2019         struct ulp_rte_act_prop *act = &params->act_prop;
2020
2021         push_vlan = action_item->conf;
2022         if (push_vlan) {
2023                 ethertype = push_vlan->ethertype;
2024                 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2025                         BNXT_TF_DBG(ERR,
2026                                     "Parse Err: Ethertype not supported\n");
2027                         return BNXT_TF_RC_PARSE_ERR;
2028                 }
2029                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2030                        &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2031                 /* Update the hdr_bitmap with push vlan */
2032                 ULP_BITMAP_SET(params->act_bitmap.bits,
2033                                BNXT_ULP_ACT_BIT_PUSH_VLAN);
2034                 return BNXT_TF_RC_SUCCESS;
2035         }
2036         BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2037         return BNXT_TF_RC_ERROR;
2038 }
2039
2040 /* Function to handle the parsing of RTE Flow action set vlan id. */
2041 int32_t
2042 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2043                                     struct ulp_rte_parser_params *params)
2044 {
2045         const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2046         uint32_t vid;
2047         struct ulp_rte_act_prop *act = &params->act_prop;
2048
2049         vlan_vid = action_item->conf;
2050         if (vlan_vid && vlan_vid->vlan_vid) {
2051                 vid = vlan_vid->vlan_vid;
2052                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2053                        &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2054                 /* Update the hdr_bitmap with vlan vid */
2055                 ULP_BITMAP_SET(params->act_bitmap.bits,
2056                                BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2057                 return BNXT_TF_RC_SUCCESS;
2058         }
2059         BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2060         return BNXT_TF_RC_ERROR;
2061 }
2062
2063 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2064 int32_t
2065 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2066                                     struct ulp_rte_parser_params *params)
2067 {
2068         const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2069         uint8_t pcp;
2070         struct ulp_rte_act_prop *act = &params->act_prop;
2071
2072         vlan_pcp = action_item->conf;
2073         if (vlan_pcp) {
2074                 pcp = vlan_pcp->vlan_pcp;
2075                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2076                        &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2077                 /* Update the hdr_bitmap with vlan vid */
2078                 ULP_BITMAP_SET(params->act_bitmap.bits,
2079                                BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2080                 return BNXT_TF_RC_SUCCESS;
2081         }
2082         BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2083         return BNXT_TF_RC_ERROR;
2084 }
2085
2086 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2087 int32_t
2088 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2089                                  struct ulp_rte_parser_params *params)
2090 {
2091         const struct rte_flow_action_set_ipv4 *set_ipv4;
2092         struct ulp_rte_act_prop *act = &params->act_prop;
2093
2094         set_ipv4 = action_item->conf;
2095         if (set_ipv4) {
2096                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2097                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2098                 /* Update the hdr_bitmap with set ipv4 src */
2099                 ULP_BITMAP_SET(params->act_bitmap.bits,
2100                                BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2101                 return BNXT_TF_RC_SUCCESS;
2102         }
2103         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2104         return BNXT_TF_RC_ERROR;
2105 }
2106
2107 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2108 int32_t
2109 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2110                                  struct ulp_rte_parser_params *params)
2111 {
2112         const struct rte_flow_action_set_ipv4 *set_ipv4;
2113         struct ulp_rte_act_prop *act = &params->act_prop;
2114
2115         set_ipv4 = action_item->conf;
2116         if (set_ipv4) {
2117                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2118                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2119                 /* Update the hdr_bitmap with set ipv4 dst */
2120                 ULP_BITMAP_SET(params->act_bitmap.bits,
2121                                BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2122                 return BNXT_TF_RC_SUCCESS;
2123         }
2124         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2125         return BNXT_TF_RC_ERROR;
2126 }
2127
2128 /* Function to handle the parsing of RTE Flow action set tp src.*/
2129 int32_t
2130 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2131                                struct ulp_rte_parser_params *params)
2132 {
2133         const struct rte_flow_action_set_tp *set_tp;
2134         struct ulp_rte_act_prop *act = &params->act_prop;
2135
2136         set_tp = action_item->conf;
2137         if (set_tp) {
2138                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2139                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2140                 /* Update the hdr_bitmap with set tp src */
2141                 ULP_BITMAP_SET(params->act_bitmap.bits,
2142                                BNXT_ULP_ACT_BIT_SET_TP_SRC);
2143                 return BNXT_TF_RC_SUCCESS;
2144         }
2145
2146         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2147         return BNXT_TF_RC_ERROR;
2148 }
2149
2150 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2151 int32_t
2152 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2153                                struct ulp_rte_parser_params *params)
2154 {
2155         const struct rte_flow_action_set_tp *set_tp;
2156         struct ulp_rte_act_prop *act = &params->act_prop;
2157
2158         set_tp = action_item->conf;
2159         if (set_tp) {
2160                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2161                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2162                 /* Update the hdr_bitmap with set tp dst */
2163                 ULP_BITMAP_SET(params->act_bitmap.bits,
2164                                BNXT_ULP_ACT_BIT_SET_TP_DST);
2165                 return BNXT_TF_RC_SUCCESS;
2166         }
2167
2168         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2169         return BNXT_TF_RC_ERROR;
2170 }
2171
2172 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2173 int32_t
2174 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2175                             struct ulp_rte_parser_params *params)
2176 {
2177         /* Update the act_bitmap with dec ttl */
2178         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2179         return BNXT_TF_RC_SUCCESS;
2180 }
2181
2182 /* Function to handle the parsing of RTE Flow action JUMP */
2183 int32_t
2184 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2185                             struct ulp_rte_parser_params *params)
2186 {
2187         /* Update the act_bitmap with dec ttl */
2188         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2189         return BNXT_TF_RC_SUCCESS;
2190 }