a54c55c5f5572cfe96086f3a9740845a549d1e5e
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
14 #include "tfp.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
18 #include "ulp_tun.h"
19
20 /* Local defines for the parsing functions */
21 #define ULP_VLAN_PRIORITY_SHIFT         13 /* First 3 bits */
22 #define ULP_VLAN_PRIORITY_MASK          0x700
23 #define ULP_VLAN_TAG_MASK               0xFFF /* Last 12 bits*/
24 #define ULP_UDP_PORT_VXLAN              4789
25
26 /* Utility function to skip the void items. */
27 static inline int32_t
28 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 {
30         if (!*item)
31                 return 0;
32         if (increment)
33                 (*item)++;
34         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
35                 (*item)++;
36         if (*item)
37                 return 1;
38         return 0;
39 }
40
41 /* Utility function to update the field_bitmap */
42 static void
43 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
44                                    uint32_t idx)
45 {
46         struct ulp_rte_hdr_field *field;
47
48         field = &params->hdr_field[idx];
49         if (ulp_bitmap_notzero(field->mask, field->size)) {
50                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
51                 /* Not exact match */
52                 if (!ulp_bitmap_is_ones(field->mask, field->size))
53                         ULP_BITMAP_SET(params->fld_bitmap.bits,
54                                        BNXT_ULP_MATCH_TYPE_BITMASK_WM);
55         } else {
56                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
57         }
58 }
59
60 /* Utility function to copy field spec items */
61 static struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
63                         const void *buffer,
64                         uint32_t size)
65 {
66         field->size = size;
67         memcpy(field->spec, buffer, field->size);
68         field++;
69         return field;
70 }
71
72 /* Utility function to copy field masks items */
73 static void
74 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
75                        uint32_t *idx,
76                        const void *buffer,
77                        uint32_t size)
78 {
79         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
80
81         memcpy(field->mask, buffer, size);
82         ulp_rte_parser_field_bitmap_update(params, *idx);
83         *idx = *idx + 1;
84 }
85
86 /* Utility function to ignore field masks items */
87 static void
88 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
89                          uint32_t *idx,
90                          const void *buffer __rte_unused,
91                          uint32_t size __rte_unused)
92 {
93         *idx = *idx + 1;
94 }
95
96 /*
97  * Function to handle the parsing of RTE Flows and placing
98  * the RTE flow items into the ulp structures.
99  */
100 int32_t
101 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
102                               struct ulp_rte_parser_params *params)
103 {
104         const struct rte_flow_item *item = pattern;
105         struct bnxt_ulp_rte_hdr_info *hdr_info;
106
107         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
108
109         /* Set the computed flags for no vlan tags before parsing */
110         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
111         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
112
113         /* Parse all the items in the pattern */
114         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
115                 /* get the header information from the flow_hdr_info table */
116                 hdr_info = &ulp_hdr_info[item->type];
117                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
118                         BNXT_TF_DBG(ERR,
119                                     "Truflow parser does not support type %d\n",
120                                     item->type);
121                         return BNXT_TF_RC_PARSE_ERR;
122                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
123                         /* call the registered callback handler */
124                         if (hdr_info->proto_hdr_func) {
125                                 if (hdr_info->proto_hdr_func(item, params) !=
126                                     BNXT_TF_RC_SUCCESS) {
127                                         return BNXT_TF_RC_ERROR;
128                                 }
129                         }
130                 }
131                 item++;
132         }
133         /* update the implied SVIF */
134         return ulp_rte_parser_implicit_match_port_process(params);
135 }
136
137 /*
138  * Function to handle the parsing of RTE Flows and placing
139  * the RTE flow actions into the ulp structures.
140  */
141 int32_t
142 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
143                               struct ulp_rte_parser_params *params)
144 {
145         const struct rte_flow_action *action_item = actions;
146         struct bnxt_ulp_rte_act_info *hdr_info;
147
148         /* Parse all the items in the pattern */
149         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
150                 /* get the header information from the flow_hdr_info table */
151                 hdr_info = &ulp_act_info[action_item->type];
152                 if (hdr_info->act_type ==
153                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
154                         BNXT_TF_DBG(ERR,
155                                     "Truflow parser does not support act %u\n",
156                                     action_item->type);
157                         return BNXT_TF_RC_ERROR;
158                 } else if (hdr_info->act_type ==
159                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
160                         /* call the registered callback handler */
161                         if (hdr_info->proto_act_func) {
162                                 if (hdr_info->proto_act_func(action_item,
163                                                              params) !=
164                                     BNXT_TF_RC_SUCCESS) {
165                                         return BNXT_TF_RC_ERROR;
166                                 }
167                         }
168                 }
169                 action_item++;
170         }
171         /* update the implied port details */
172         ulp_rte_parser_implicit_act_port_process(params);
173         return BNXT_TF_RC_SUCCESS;
174 }
175
176 /*
177  * Function to handle the post processing of the computed
178  * fields for the interface.
179  */
180 static void
181 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
182 {
183         uint32_t ifindex;
184         uint16_t port_id, parif;
185         uint32_t mtype;
186         enum bnxt_ulp_direction_type dir;
187
188         /* get the direction details */
189         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
190
191         /* read the port id details */
192         port_id = ULP_COMP_FLD_IDX_RD(params,
193                                       BNXT_ULP_CF_IDX_INCOMING_IF);
194         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
195                                               port_id,
196                                               &ifindex)) {
197                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
198                 return;
199         }
200
201         if (dir == BNXT_ULP_DIR_INGRESS) {
202                 /* Set port PARIF */
203                 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
204                                           BNXT_ULP_PHY_PORT_PARIF, &parif)) {
205                         BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
206                         return;
207                 }
208                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
209                                     parif);
210         } else {
211                 /* Get the match port type */
212                 mtype = ULP_COMP_FLD_IDX_RD(params,
213                                             BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
214                 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
215                         ULP_COMP_FLD_IDX_WR(params,
216                                             BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
217                                             1);
218                         /* Set VF func PARIF */
219                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
220                                                   BNXT_ULP_VF_FUNC_PARIF,
221                                                   &parif)) {
222                                 BNXT_TF_DBG(ERR,
223                                             "ParseErr:ifindex is not valid\n");
224                                 return;
225                         }
226                         ULP_COMP_FLD_IDX_WR(params,
227                                             BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
228                                             parif);
229
230                         /* populate the loopback parif */
231                         ULP_COMP_FLD_IDX_WR(params,
232                                             BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
233                                             BNXT_ULP_SYM_VF_FUNC_PARIF);
234
235                 } else {
236                         /* Set DRV func PARIF */
237                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
238                                                   BNXT_ULP_DRV_FUNC_PARIF,
239                                                   &parif)) {
240                                 BNXT_TF_DBG(ERR,
241                                             "ParseErr:ifindex is not valid\n");
242                                 return;
243                         }
244                         ULP_COMP_FLD_IDX_WR(params,
245                                             BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
246                                             parif);
247                 }
248         }
249 }
250
251 static int32_t
252 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
253 {
254         enum bnxt_ulp_intf_type match_port_type, act_port_type;
255         enum bnxt_ulp_direction_type dir;
256         uint32_t act_port_set;
257
258         /* Get the computed details */
259         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
260         match_port_type = ULP_COMP_FLD_IDX_RD(params,
261                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
262         act_port_type = ULP_COMP_FLD_IDX_RD(params,
263                                             BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
264         act_port_set = ULP_COMP_FLD_IDX_RD(params,
265                                            BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
266
267         /* set the flow direction in the proto and action header */
268         if (dir == BNXT_ULP_DIR_EGRESS) {
269                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
270                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
271                 ULP_BITMAP_SET(params->act_bitmap.bits,
272                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
273         }
274
275         /* calculate the VF to VF flag */
276         if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
277             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
278                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
279
280         /* Update the decrement ttl computational fields */
281         if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
282                              BNXT_ULP_ACTION_BIT_DEC_TTL)) {
283                 /*
284                  * Check that vxlan proto is included and vxlan decap
285                  * action is not set then decrement tunnel ttl.
286                  * Similarly add GRE and NVGRE in future.
287                  */
288                 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
289                                       BNXT_ULP_HDR_BIT_T_VXLAN) &&
290                     !ULP_BITMAP_ISSET(params->act_bitmap.bits,
291                                       BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
292                         ULP_COMP_FLD_IDX_WR(params,
293                                             BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
294                 } else {
295                         ULP_COMP_FLD_IDX_WR(params,
296                                             BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
297                 }
298         }
299
300         /* Merge the hdr_fp_bit into the proto header bit */
301         params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
302
303         /* Update the computed interface parameters */
304         bnxt_ulp_comp_fld_intf_update(params);
305
306         /* TBD: Handle the flow rejection scenarios */
307         return 0;
308 }
309
310 /*
311  * Function to handle the post processing of the parsing details
312  */
313 int32_t
314 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
315 {
316         ulp_post_process_normal_flow(params);
317         return ulp_post_process_tun_flow(params);
318 }
319
320 /*
321  * Function to compute the flow direction based on the match port details
322  */
323 static void
324 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
325 {
326         enum bnxt_ulp_intf_type match_port_type;
327
328         /* Get the match port type */
329         match_port_type = ULP_COMP_FLD_IDX_RD(params,
330                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
331
332         /* If ingress flow and matchport is vf rep then dir is egress*/
333         if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
334             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
335                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
336                                     BNXT_ULP_DIR_EGRESS);
337         } else {
338                 /* Assign the input direction */
339                 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
340                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
341                                             BNXT_ULP_DIR_INGRESS);
342                 else
343                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
344                                             BNXT_ULP_DIR_EGRESS);
345         }
346 }
347
348 /* Function to handle the parsing of RTE Flow item PF Header. */
349 static int32_t
350 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
351                         uint32_t ifindex,
352                         uint16_t mask)
353 {
354         uint16_t svif;
355         enum bnxt_ulp_direction_type dir;
356         struct ulp_rte_hdr_field *hdr_field;
357         enum bnxt_ulp_svif_type svif_type;
358         enum bnxt_ulp_intf_type port_type;
359
360         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
361             BNXT_ULP_INVALID_SVIF_VAL) {
362                 BNXT_TF_DBG(ERR,
363                             "SVIF already set,multiple source not support'd\n");
364                 return BNXT_TF_RC_ERROR;
365         }
366
367         /* Get port type details */
368         port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
369         if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
370                 BNXT_TF_DBG(ERR, "Invalid port type\n");
371                 return BNXT_TF_RC_ERROR;
372         }
373
374         /* Update the match port type */
375         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
376
377         /* compute the direction */
378         bnxt_ulp_rte_parser_direction_compute(params);
379
380         /* Get the computed direction */
381         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
382         if (dir == BNXT_ULP_DIR_INGRESS) {
383                 svif_type = BNXT_ULP_PHY_PORT_SVIF;
384         } else {
385                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
386                         svif_type = BNXT_ULP_VF_FUNC_SVIF;
387                 else
388                         svif_type = BNXT_ULP_DRV_FUNC_SVIF;
389         }
390         ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
391                              &svif);
392         svif = rte_cpu_to_be_16(svif);
393         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
394         memcpy(hdr_field->spec, &svif, sizeof(svif));
395         memcpy(hdr_field->mask, &mask, sizeof(mask));
396         hdr_field->size = sizeof(svif);
397         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
398                             rte_be_to_cpu_16(svif));
399         return BNXT_TF_RC_SUCCESS;
400 }
401
402 /* Function to handle the parsing of the RTE port id */
403 int32_t
404 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
405 {
406         uint16_t port_id = 0;
407         uint16_t svif_mask = 0xFFFF;
408         uint32_t ifindex;
409         int32_t rc = BNXT_TF_RC_ERROR;
410
411         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
412             BNXT_ULP_INVALID_SVIF_VAL)
413                 return BNXT_TF_RC_SUCCESS;
414
415         /* SVIF not set. So get the port id */
416         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
417
418         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
419                                               port_id,
420                                               &ifindex)) {
421                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
422                 return rc;
423         }
424
425         /* Update the SVIF details */
426         rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
427         return rc;
428 }
429
430 /* Function to handle the implicit action port id */
431 int32_t
432 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
433 {
434         struct rte_flow_action action_item = {0};
435         struct rte_flow_action_port_id port_id = {0};
436
437         /* Read the action port set bit */
438         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
439                 /* Already set, so just exit */
440                 return BNXT_TF_RC_SUCCESS;
441         }
442         port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
443         action_item.conf = &port_id;
444
445         /* Update the action port based on incoming port */
446         ulp_rte_port_id_act_handler(&action_item, params);
447
448         /* Reset the action port set bit */
449         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
450         return BNXT_TF_RC_SUCCESS;
451 }
452
453 /* Function to handle the parsing of RTE Flow item PF Header. */
454 int32_t
455 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
456                        struct ulp_rte_parser_params *params)
457 {
458         uint16_t port_id = 0;
459         uint16_t svif_mask = 0xFFFF;
460         uint32_t ifindex;
461
462         /* Get the implicit port id */
463         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
464
465         /* perform the conversion from dpdk port to bnxt ifindex */
466         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
467                                               port_id,
468                                               &ifindex)) {
469                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
470                 return BNXT_TF_RC_ERROR;
471         }
472
473         /* Update the SVIF details */
474         return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
475 }
476
477 /* Function to handle the parsing of RTE Flow item VF Header. */
478 int32_t
479 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
480                        struct ulp_rte_parser_params *params)
481 {
482         const struct rte_flow_item_vf *vf_spec = item->spec;
483         const struct rte_flow_item_vf *vf_mask = item->mask;
484         uint16_t mask = 0;
485         uint32_t ifindex;
486         int32_t rc = BNXT_TF_RC_PARSE_ERR;
487
488         /* Get VF rte_flow_item for Port details */
489         if (!vf_spec) {
490                 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
491                 return rc;
492         }
493         if (!vf_mask) {
494                 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
495                 return rc;
496         }
497         mask = vf_mask->id;
498
499         /* perform the conversion from VF Func id to bnxt ifindex */
500         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
501                                                  vf_spec->id,
502                                                  &ifindex)) {
503                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
504                 return rc;
505         }
506         /* Update the SVIF details */
507         return ulp_rte_parser_svif_set(params, ifindex, mask);
508 }
509
510 /* Function to handle the parsing of RTE Flow item port id  Header. */
511 int32_t
512 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
513                             struct ulp_rte_parser_params *params)
514 {
515         const struct rte_flow_item_port_id *port_spec = item->spec;
516         const struct rte_flow_item_port_id *port_mask = item->mask;
517         uint16_t mask = 0;
518         int32_t rc = BNXT_TF_RC_PARSE_ERR;
519         uint32_t ifindex;
520
521         if (!port_spec) {
522                 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
523                 return rc;
524         }
525         if (!port_mask) {
526                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
527                 return rc;
528         }
529         mask = port_mask->id;
530
531         /* perform the conversion from dpdk port to bnxt ifindex */
532         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
533                                               port_spec->id,
534                                               &ifindex)) {
535                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
536                 return rc;
537         }
538         /* Update the SVIF details */
539         return ulp_rte_parser_svif_set(params, ifindex, mask);
540 }
541
542 /* Function to handle the parsing of RTE Flow item phy port Header. */
543 int32_t
544 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
545                              struct ulp_rte_parser_params *params)
546 {
547         const struct rte_flow_item_phy_port *port_spec = item->spec;
548         const struct rte_flow_item_phy_port *port_mask = item->mask;
549         uint16_t mask = 0;
550         int32_t rc = BNXT_TF_RC_ERROR;
551         uint16_t svif;
552         enum bnxt_ulp_direction_type dir;
553         struct ulp_rte_hdr_field *hdr_field;
554
555         /* Copy the rte_flow_item for phy port into hdr_field */
556         if (!port_spec) {
557                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
558                 return rc;
559         }
560         if (!port_mask) {
561                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
562                 return rc;
563         }
564         mask = port_mask->index;
565
566         /* Update the match port type */
567         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
568                             BNXT_ULP_INTF_TYPE_PHY_PORT);
569
570         /* Compute the Hw direction */
571         bnxt_ulp_rte_parser_direction_compute(params);
572
573         /* Direction validation */
574         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
575         if (dir == BNXT_ULP_DIR_EGRESS) {
576                 BNXT_TF_DBG(ERR,
577                             "Parse Err:Phy ports are valid only for ingress\n");
578                 return BNXT_TF_RC_PARSE_ERR;
579         }
580
581         /* Get the physical port details from port db */
582         rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
583                                            &svif);
584         if (rc) {
585                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
586                 return BNXT_TF_RC_PARSE_ERR;
587         }
588
589         /* Update the SVIF details */
590         svif = rte_cpu_to_be_16(svif);
591         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
592         memcpy(hdr_field->spec, &svif, sizeof(svif));
593         memcpy(hdr_field->mask, &mask, sizeof(mask));
594         hdr_field->size = sizeof(svif);
595         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
596                             rte_be_to_cpu_16(svif));
597         return BNXT_TF_RC_SUCCESS;
598 }
599
600 /* Function to handle the update of proto header based on field values */
601 static void
602 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
603                              uint16_t type, uint32_t in_flag)
604 {
605         if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
606                 if (in_flag) {
607                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
608                                        BNXT_ULP_HDR_BIT_I_IPV4);
609                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
610                 } else {
611                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
612                                        BNXT_ULP_HDR_BIT_O_IPV4);
613                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
614                 }
615         } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
616                 if (in_flag) {
617                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
618                                        BNXT_ULP_HDR_BIT_I_IPV6);
619                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
620                 } else {
621                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
622                                        BNXT_ULP_HDR_BIT_O_IPV6);
623                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
624                 }
625         }
626 }
627
628 /* Internal Function to identify broadcast or multicast packets */
629 static int32_t
630 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
631 {
632         if (rte_is_multicast_ether_addr(eth_addr) ||
633             rte_is_broadcast_ether_addr(eth_addr)) {
634                 BNXT_TF_DBG(DEBUG,
635                             "No support for bcast or mcast addr offload\n");
636                 return 1;
637         }
638         return 0;
639 }
640
641 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
642 int32_t
643 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
644                         struct ulp_rte_parser_params *params)
645 {
646         const struct rte_flow_item_eth *eth_spec = item->spec;
647         const struct rte_flow_item_eth *eth_mask = item->mask;
648         struct ulp_rte_hdr_field *field;
649         uint32_t idx = params->field_idx;
650         uint32_t size;
651         uint16_t eth_type = 0;
652         uint32_t inner_flag = 0;
653
654         /*
655          * Copy the rte_flow_item for eth into hdr_field using ethernet
656          * header fields
657          */
658         if (eth_spec) {
659                 size = sizeof(eth_spec->dst.addr_bytes);
660                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
661                                                 eth_spec->dst.addr_bytes,
662                                                 size);
663                 /* Todo: work around to avoid multicast and broadcast addr */
664                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->dst))
665                         return BNXT_TF_RC_PARSE_ERR;
666
667                 size = sizeof(eth_spec->src.addr_bytes);
668                 field = ulp_rte_parser_fld_copy(field,
669                                                 eth_spec->src.addr_bytes,
670                                                 size);
671                 /* Todo: work around to avoid multicast and broadcast addr */
672                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->src))
673                         return BNXT_TF_RC_PARSE_ERR;
674
675                 field = ulp_rte_parser_fld_copy(field,
676                                                 &eth_spec->type,
677                                                 sizeof(eth_spec->type));
678                 eth_type = eth_spec->type;
679         }
680         if (eth_mask) {
681                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
682                                        sizeof(eth_mask->dst.addr_bytes));
683                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
684                                        sizeof(eth_mask->src.addr_bytes));
685                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
686                                        sizeof(eth_mask->type));
687         }
688         /* Add number of vlan header elements */
689         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
690         params->vlan_idx = params->field_idx;
691         params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
692
693         /* Update the protocol hdr bitmap */
694         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
695                              BNXT_ULP_HDR_BIT_O_ETH) ||
696             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697                              BNXT_ULP_HDR_BIT_O_IPV4) ||
698             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699                              BNXT_ULP_HDR_BIT_O_IPV6) ||
700             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
701                              BNXT_ULP_HDR_BIT_O_UDP) ||
702             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
703                              BNXT_ULP_HDR_BIT_O_TCP)) {
704                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
705                 inner_flag = 1;
706         } else {
707                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
708         }
709         /* Update the field protocol hdr bitmap */
710         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
711
712         return BNXT_TF_RC_SUCCESS;
713 }
714
715 /* Function to handle the parsing of RTE Flow item Vlan Header. */
716 int32_t
717 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
718                          struct ulp_rte_parser_params *params)
719 {
720         const struct rte_flow_item_vlan *vlan_spec = item->spec;
721         const struct rte_flow_item_vlan *vlan_mask = item->mask;
722         struct ulp_rte_hdr_field *field;
723         struct ulp_rte_hdr_bitmap       *hdr_bit;
724         uint32_t idx = params->vlan_idx;
725         uint16_t vlan_tag, priority;
726         uint32_t outer_vtag_num;
727         uint32_t inner_vtag_num;
728         uint16_t eth_type = 0;
729         uint32_t inner_flag = 0;
730
731         /*
732          * Copy the rte_flow_item for vlan into hdr_field using Vlan
733          * header fields
734          */
735         if (vlan_spec) {
736                 vlan_tag = ntohs(vlan_spec->tci);
737                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
738                 vlan_tag &= ULP_VLAN_TAG_MASK;
739                 vlan_tag = htons(vlan_tag);
740
741                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
742                                                 &priority,
743                                                 sizeof(priority));
744                 field = ulp_rte_parser_fld_copy(field,
745                                                 &vlan_tag,
746                                                 sizeof(vlan_tag));
747                 field = ulp_rte_parser_fld_copy(field,
748                                                 &vlan_spec->inner_type,
749                                                 sizeof(vlan_spec->inner_type));
750                 eth_type = vlan_spec->inner_type;
751         }
752
753         if (vlan_mask) {
754                 vlan_tag = ntohs(vlan_mask->tci);
755                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
756                 vlan_tag &= 0xfff;
757
758                 /*
759                  * the storage for priority and vlan tag is 2 bytes
760                  * The mask of priority which is 3 bits if it is all 1's
761                  * then make the rest bits 13 bits as 1's
762                  * so that it is matched as exact match.
763                  */
764                 if (priority == ULP_VLAN_PRIORITY_MASK)
765                         priority |= ~ULP_VLAN_PRIORITY_MASK;
766                 if (vlan_tag == ULP_VLAN_TAG_MASK)
767                         vlan_tag |= ~ULP_VLAN_TAG_MASK;
768                 vlan_tag = htons(vlan_tag);
769
770                 /*
771                  * The priority field is ignored since OVS is setting it as
772                  * wild card match and it is not supported. This is a work
773                  * around and shall be addressed in the future.
774                  */
775                 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
776                                          sizeof(priority));
777
778                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
779                                        sizeof(vlan_tag));
780                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
781                                        sizeof(vlan_mask->inner_type));
782         }
783         /* Set the vlan index to new incremented value */
784         params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
785
786         /* Get the outer tag and inner tag counts */
787         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
788                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
789         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
790                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
791
792         /* Update the hdr_bitmap of the vlans */
793         hdr_bit = &params->hdr_bitmap;
794         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
795             !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
796             !outer_vtag_num) {
797                 /* Update the vlan tag num */
798                 outer_vtag_num++;
799                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
800                                     outer_vtag_num);
801                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
802                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
803                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
804                                BNXT_ULP_HDR_BIT_OO_VLAN);
805         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
806                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
807                    outer_vtag_num == 1) {
808                 /* update the vlan tag num */
809                 outer_vtag_num++;
810                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
811                                     outer_vtag_num);
812                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
813                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
814                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
815                                BNXT_ULP_HDR_BIT_OI_VLAN);
816         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
817                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
818                    !inner_vtag_num) {
819                 /* update the vlan tag num */
820                 inner_vtag_num++;
821                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
822                                     inner_vtag_num);
823                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
824                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
825                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
826                                BNXT_ULP_HDR_BIT_IO_VLAN);
827                 inner_flag = 1;
828         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
829                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
830                    inner_vtag_num == 1) {
831                 /* update the vlan tag num */
832                 inner_vtag_num++;
833                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
834                                     inner_vtag_num);
835                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
836                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
837                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
838                                BNXT_ULP_HDR_BIT_II_VLAN);
839                 inner_flag = 1;
840         } else {
841                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
842                 return BNXT_TF_RC_ERROR;
843         }
844         /* Update the field protocol hdr bitmap */
845         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
846         return BNXT_TF_RC_SUCCESS;
847 }
848
849 /* Function to handle the update of proto header based on field values */
850 static void
851 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
852                              uint8_t proto, uint32_t in_flag)
853 {
854         if (proto == IPPROTO_UDP) {
855                 if (in_flag) {
856                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
857                                        BNXT_ULP_HDR_BIT_I_UDP);
858                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
859                 } else {
860                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
861                                        BNXT_ULP_HDR_BIT_O_UDP);
862                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
863                 }
864         } else if (proto == IPPROTO_TCP) {
865                 if (in_flag) {
866                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
867                                        BNXT_ULP_HDR_BIT_I_TCP);
868                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
869                 } else {
870                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
871                                        BNXT_ULP_HDR_BIT_O_TCP);
872                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
873                 }
874         }
875 }
876
877 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
878 int32_t
879 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
880                          struct ulp_rte_parser_params *params)
881 {
882         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
883         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
884         struct ulp_rte_hdr_field *field;
885         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
886         uint32_t idx = params->field_idx;
887         uint32_t size;
888         uint8_t proto = 0;
889         uint32_t inner_flag = 0;
890         uint32_t cnt;
891
892         /* validate there are no 3rd L3 header */
893         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
894         if (cnt == 2) {
895                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
896                 return BNXT_TF_RC_ERROR;
897         }
898
899         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
900                               BNXT_ULP_HDR_BIT_O_ETH) &&
901             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
902                               BNXT_ULP_HDR_BIT_I_ETH)) {
903                 /* Since F2 flow does not include eth item, when parser detects
904                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
905                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
906                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
907                  * This will allow the parser post processor to update the
908                  * t_dmac in hdr_field[o_eth.dmac]
909                  */
910                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
911                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
912                 params->field_idx = idx;
913         }
914
915         /*
916          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
917          * header fields
918          */
919         if (ipv4_spec) {
920                 size = sizeof(ipv4_spec->hdr.version_ihl);
921                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
922                                                 &ipv4_spec->hdr.version_ihl,
923                                                 size);
924                 size = sizeof(ipv4_spec->hdr.type_of_service);
925                 field = ulp_rte_parser_fld_copy(field,
926                                                 &ipv4_spec->hdr.type_of_service,
927                                                 size);
928                 size = sizeof(ipv4_spec->hdr.total_length);
929                 field = ulp_rte_parser_fld_copy(field,
930                                                 &ipv4_spec->hdr.total_length,
931                                                 size);
932                 size = sizeof(ipv4_spec->hdr.packet_id);
933                 field = ulp_rte_parser_fld_copy(field,
934                                                 &ipv4_spec->hdr.packet_id,
935                                                 size);
936                 size = sizeof(ipv4_spec->hdr.fragment_offset);
937                 field = ulp_rte_parser_fld_copy(field,
938                                                 &ipv4_spec->hdr.fragment_offset,
939                                                 size);
940                 size = sizeof(ipv4_spec->hdr.time_to_live);
941                 field = ulp_rte_parser_fld_copy(field,
942                                                 &ipv4_spec->hdr.time_to_live,
943                                                 size);
944                 size = sizeof(ipv4_spec->hdr.next_proto_id);
945                 field = ulp_rte_parser_fld_copy(field,
946                                                 &ipv4_spec->hdr.next_proto_id,
947                                                 size);
948                 proto = ipv4_spec->hdr.next_proto_id;
949                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
950                 field = ulp_rte_parser_fld_copy(field,
951                                                 &ipv4_spec->hdr.hdr_checksum,
952                                                 size);
953                 size = sizeof(ipv4_spec->hdr.src_addr);
954                 field = ulp_rte_parser_fld_copy(field,
955                                                 &ipv4_spec->hdr.src_addr,
956                                                 size);
957                 size = sizeof(ipv4_spec->hdr.dst_addr);
958                 field = ulp_rte_parser_fld_copy(field,
959                                                 &ipv4_spec->hdr.dst_addr,
960                                                 size);
961         }
962         if (ipv4_mask) {
963                 ulp_rte_prsr_mask_copy(params, &idx,
964                                        &ipv4_mask->hdr.version_ihl,
965                                        sizeof(ipv4_mask->hdr.version_ihl));
966                 /*
967                  * The tos field is ignored since OVS is setting it as wild card
968                  * match and it is not supported. This is a work around and
969                  * shall be addressed in the future.
970                  */
971                 ulp_rte_prsr_mask_ignore(params, &idx,
972                                          &ipv4_mask->hdr.type_of_service,
973                                          sizeof(ipv4_mask->hdr.type_of_service)
974                                          );
975
976                 ulp_rte_prsr_mask_copy(params, &idx,
977                                        &ipv4_mask->hdr.total_length,
978                                        sizeof(ipv4_mask->hdr.total_length));
979                 ulp_rte_prsr_mask_copy(params, &idx,
980                                        &ipv4_mask->hdr.packet_id,
981                                        sizeof(ipv4_mask->hdr.packet_id));
982                 ulp_rte_prsr_mask_copy(params, &idx,
983                                        &ipv4_mask->hdr.fragment_offset,
984                                        sizeof(ipv4_mask->hdr.fragment_offset));
985                 ulp_rte_prsr_mask_copy(params, &idx,
986                                        &ipv4_mask->hdr.time_to_live,
987                                        sizeof(ipv4_mask->hdr.time_to_live));
988                 ulp_rte_prsr_mask_copy(params, &idx,
989                                        &ipv4_mask->hdr.next_proto_id,
990                                        sizeof(ipv4_mask->hdr.next_proto_id));
991                 ulp_rte_prsr_mask_copy(params, &idx,
992                                        &ipv4_mask->hdr.hdr_checksum,
993                                        sizeof(ipv4_mask->hdr.hdr_checksum));
994                 ulp_rte_prsr_mask_copy(params, &idx,
995                                        &ipv4_mask->hdr.src_addr,
996                                        sizeof(ipv4_mask->hdr.src_addr));
997                 ulp_rte_prsr_mask_copy(params, &idx,
998                                        &ipv4_mask->hdr.dst_addr,
999                                        sizeof(ipv4_mask->hdr.dst_addr));
1000         }
1001         /* Add the number of ipv4 header elements */
1002         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1003
1004         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1005         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1006             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1007                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1008                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1009                 inner_flag = 1;
1010         } else {
1011                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1012                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1013         }
1014
1015         /* Some of the PMD applications may set the protocol field
1016          * in the IPv4 spec but don't set the mask. So, consider
1017          * the mask in the proto value calculation.
1018          */
1019         if (ipv4_mask)
1020                 proto &= ipv4_mask->hdr.next_proto_id;
1021
1022         /* Update the field protocol hdr bitmap */
1023         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1024         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1025         return BNXT_TF_RC_SUCCESS;
1026 }
1027
1028 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1029 int32_t
1030 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1031                          struct ulp_rte_parser_params *params)
1032 {
1033         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1034         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1035         struct ulp_rte_hdr_field *field;
1036         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1037         uint32_t idx = params->field_idx;
1038         uint32_t size;
1039         uint32_t vtcf, vtcf_mask;
1040         uint8_t proto = 0;
1041         uint32_t inner_flag = 0;
1042         uint32_t cnt;
1043
1044         /* validate there are no 3rd L3 header */
1045         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1046         if (cnt == 2) {
1047                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1048                 return BNXT_TF_RC_ERROR;
1049         }
1050
1051         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1052                               BNXT_ULP_HDR_BIT_O_ETH) &&
1053             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1054                               BNXT_ULP_HDR_BIT_I_ETH)) {
1055                 /* Since F2 flow does not include eth item, when parser detects
1056                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1057                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1058                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1059                  * This will allow the parser post processor to update the
1060                  * t_dmac in hdr_field[o_eth.dmac]
1061                  */
1062                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1063                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
1064                 params->field_idx = idx;
1065         }
1066
1067         /*
1068          * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1069          * header fields
1070          */
1071         if (ipv6_spec) {
1072                 size = sizeof(ipv6_spec->hdr.vtc_flow);
1073
1074                 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1075                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1076                                                 &vtcf,
1077                                                 size);
1078
1079                 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1080                 field = ulp_rte_parser_fld_copy(field,
1081                                                 &vtcf,
1082                                                 size);
1083
1084                 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1085                 field = ulp_rte_parser_fld_copy(field,
1086                                                 &vtcf,
1087                                                 size);
1088
1089                 size = sizeof(ipv6_spec->hdr.payload_len);
1090                 field = ulp_rte_parser_fld_copy(field,
1091                                                 &ipv6_spec->hdr.payload_len,
1092                                                 size);
1093                 size = sizeof(ipv6_spec->hdr.proto);
1094                 field = ulp_rte_parser_fld_copy(field,
1095                                                 &ipv6_spec->hdr.proto,
1096                                                 size);
1097                 proto = ipv6_spec->hdr.proto;
1098                 size = sizeof(ipv6_spec->hdr.hop_limits);
1099                 field = ulp_rte_parser_fld_copy(field,
1100                                                 &ipv6_spec->hdr.hop_limits,
1101                                                 size);
1102                 size = sizeof(ipv6_spec->hdr.src_addr);
1103                 field = ulp_rte_parser_fld_copy(field,
1104                                                 &ipv6_spec->hdr.src_addr,
1105                                                 size);
1106                 size = sizeof(ipv6_spec->hdr.dst_addr);
1107                 field = ulp_rte_parser_fld_copy(field,
1108                                                 &ipv6_spec->hdr.dst_addr,
1109                                                 size);
1110         }
1111         if (ipv6_mask) {
1112                 size = sizeof(ipv6_mask->hdr.vtc_flow);
1113
1114                 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1115                 ulp_rte_prsr_mask_copy(params, &idx,
1116                                        &vtcf_mask,
1117                                        size);
1118                 /*
1119                  * The TC and flow label field are ignored since OVS is
1120                  * setting it for match and it is not supported.
1121                  * This is a work around and
1122                  * shall be addressed in the future.
1123                  */
1124                 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1125                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1126                 vtcf_mask =
1127                         BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1128                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1129
1130                 ulp_rte_prsr_mask_copy(params, &idx,
1131                                        &ipv6_mask->hdr.payload_len,
1132                                        sizeof(ipv6_mask->hdr.payload_len));
1133                 ulp_rte_prsr_mask_copy(params, &idx,
1134                                        &ipv6_mask->hdr.proto,
1135                                        sizeof(ipv6_mask->hdr.proto));
1136                 ulp_rte_prsr_mask_copy(params, &idx,
1137                                        &ipv6_mask->hdr.hop_limits,
1138                                        sizeof(ipv6_mask->hdr.hop_limits));
1139                 ulp_rte_prsr_mask_copy(params, &idx,
1140                                        &ipv6_mask->hdr.src_addr,
1141                                        sizeof(ipv6_mask->hdr.src_addr));
1142                 ulp_rte_prsr_mask_copy(params, &idx,
1143                                        &ipv6_mask->hdr.dst_addr,
1144                                        sizeof(ipv6_mask->hdr.dst_addr));
1145         }
1146         /* add number of ipv6 header elements */
1147         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1148
1149         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1150         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1151             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1152                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1153                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1154                 inner_flag = 1;
1155         } else {
1156                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1157                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1158         }
1159
1160         /* Some of the PMD applications may set the protocol field
1161          * in the IPv6 spec but don't set the mask. So, consider
1162          * the mask in proto value calculation.
1163          */
1164         if (ipv6_mask)
1165                 proto &= ipv6_mask->hdr.proto;
1166
1167         /* Update the field protocol hdr bitmap */
1168         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1169         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1170
1171         return BNXT_TF_RC_SUCCESS;
1172 }
1173
1174 /* Function to handle the update of proto header based on field values */
1175 static void
1176 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1177                              uint16_t dst_port)
1178 {
1179         if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1180                 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1181                                BNXT_ULP_HDR_BIT_T_VXLAN);
1182                 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1183         }
1184 }
1185
1186 /* Function to handle the parsing of RTE Flow item UDP Header. */
1187 int32_t
1188 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1189                         struct ulp_rte_parser_params *params)
1190 {
1191         const struct rte_flow_item_udp *udp_spec = item->spec;
1192         const struct rte_flow_item_udp *udp_mask = item->mask;
1193         struct ulp_rte_hdr_field *field;
1194         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1195         uint32_t idx = params->field_idx;
1196         uint32_t size;
1197         uint16_t dst_port = 0;
1198         uint32_t cnt;
1199
1200         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1201         if (cnt == 2) {
1202                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1203                 return BNXT_TF_RC_ERROR;
1204         }
1205
1206         /*
1207          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1208          * header fields
1209          */
1210         if (udp_spec) {
1211                 size = sizeof(udp_spec->hdr.src_port);
1212                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1213                                                 &udp_spec->hdr.src_port,
1214                                                 size);
1215
1216                 size = sizeof(udp_spec->hdr.dst_port);
1217                 field = ulp_rte_parser_fld_copy(field,
1218                                                 &udp_spec->hdr.dst_port,
1219                                                 size);
1220                 dst_port = udp_spec->hdr.dst_port;
1221                 size = sizeof(udp_spec->hdr.dgram_len);
1222                 field = ulp_rte_parser_fld_copy(field,
1223                                                 &udp_spec->hdr.dgram_len,
1224                                                 size);
1225                 size = sizeof(udp_spec->hdr.dgram_cksum);
1226                 field = ulp_rte_parser_fld_copy(field,
1227                                                 &udp_spec->hdr.dgram_cksum,
1228                                                 size);
1229         }
1230         if (udp_mask) {
1231                 ulp_rte_prsr_mask_copy(params, &idx,
1232                                        &udp_mask->hdr.src_port,
1233                                        sizeof(udp_mask->hdr.src_port));
1234                 ulp_rte_prsr_mask_copy(params, &idx,
1235                                        &udp_mask->hdr.dst_port,
1236                                        sizeof(udp_mask->hdr.dst_port));
1237                 ulp_rte_prsr_mask_copy(params, &idx,
1238                                        &udp_mask->hdr.dgram_len,
1239                                        sizeof(udp_mask->hdr.dgram_len));
1240                 ulp_rte_prsr_mask_copy(params, &idx,
1241                                        &udp_mask->hdr.dgram_cksum,
1242                                        sizeof(udp_mask->hdr.dgram_cksum));
1243         }
1244
1245         /* Add number of UDP header elements */
1246         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1247
1248         /* Set the udp header bitmap and computed l4 header bitmaps */
1249         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1250             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1251                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1252                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1253         } else {
1254                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1255                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1256                 /* Update the field protocol hdr bitmap */
1257                 ulp_rte_l4_proto_type_update(params, dst_port);
1258         }
1259         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1260         return BNXT_TF_RC_SUCCESS;
1261 }
1262
1263 /* Function to handle the parsing of RTE Flow item TCP Header. */
1264 int32_t
1265 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1266                         struct ulp_rte_parser_params *params)
1267 {
1268         const struct rte_flow_item_tcp *tcp_spec = item->spec;
1269         const struct rte_flow_item_tcp *tcp_mask = item->mask;
1270         struct ulp_rte_hdr_field *field;
1271         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1272         uint32_t idx = params->field_idx;
1273         uint32_t size;
1274         uint32_t cnt;
1275
1276         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1277         if (cnt == 2) {
1278                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1279                 return BNXT_TF_RC_ERROR;
1280         }
1281
1282         /*
1283          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1284          * header fields
1285          */
1286         if (tcp_spec) {
1287                 size = sizeof(tcp_spec->hdr.src_port);
1288                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1289                                                 &tcp_spec->hdr.src_port,
1290                                                 size);
1291                 size = sizeof(tcp_spec->hdr.dst_port);
1292                 field = ulp_rte_parser_fld_copy(field,
1293                                                 &tcp_spec->hdr.dst_port,
1294                                                 size);
1295                 size = sizeof(tcp_spec->hdr.sent_seq);
1296                 field = ulp_rte_parser_fld_copy(field,
1297                                                 &tcp_spec->hdr.sent_seq,
1298                                                 size);
1299                 size = sizeof(tcp_spec->hdr.recv_ack);
1300                 field = ulp_rte_parser_fld_copy(field,
1301                                                 &tcp_spec->hdr.recv_ack,
1302                                                 size);
1303                 size = sizeof(tcp_spec->hdr.data_off);
1304                 field = ulp_rte_parser_fld_copy(field,
1305                                                 &tcp_spec->hdr.data_off,
1306                                                 size);
1307                 size = sizeof(tcp_spec->hdr.tcp_flags);
1308                 field = ulp_rte_parser_fld_copy(field,
1309                                                 &tcp_spec->hdr.tcp_flags,
1310                                                 size);
1311                 size = sizeof(tcp_spec->hdr.rx_win);
1312                 field = ulp_rte_parser_fld_copy(field,
1313                                                 &tcp_spec->hdr.rx_win,
1314                                                 size);
1315                 size = sizeof(tcp_spec->hdr.cksum);
1316                 field = ulp_rte_parser_fld_copy(field,
1317                                                 &tcp_spec->hdr.cksum,
1318                                                 size);
1319                 size = sizeof(tcp_spec->hdr.tcp_urp);
1320                 field = ulp_rte_parser_fld_copy(field,
1321                                                 &tcp_spec->hdr.tcp_urp,
1322                                                 size);
1323         } else {
1324                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1325         }
1326
1327         if (tcp_mask) {
1328                 ulp_rte_prsr_mask_copy(params, &idx,
1329                                        &tcp_mask->hdr.src_port,
1330                                        sizeof(tcp_mask->hdr.src_port));
1331                 ulp_rte_prsr_mask_copy(params, &idx,
1332                                        &tcp_mask->hdr.dst_port,
1333                                        sizeof(tcp_mask->hdr.dst_port));
1334                 ulp_rte_prsr_mask_copy(params, &idx,
1335                                        &tcp_mask->hdr.sent_seq,
1336                                        sizeof(tcp_mask->hdr.sent_seq));
1337                 ulp_rte_prsr_mask_copy(params, &idx,
1338                                        &tcp_mask->hdr.recv_ack,
1339                                        sizeof(tcp_mask->hdr.recv_ack));
1340                 ulp_rte_prsr_mask_copy(params, &idx,
1341                                        &tcp_mask->hdr.data_off,
1342                                        sizeof(tcp_mask->hdr.data_off));
1343                 ulp_rte_prsr_mask_copy(params, &idx,
1344                                        &tcp_mask->hdr.tcp_flags,
1345                                        sizeof(tcp_mask->hdr.tcp_flags));
1346                 ulp_rte_prsr_mask_copy(params, &idx,
1347                                        &tcp_mask->hdr.rx_win,
1348                                        sizeof(tcp_mask->hdr.rx_win));
1349                 ulp_rte_prsr_mask_copy(params, &idx,
1350                                        &tcp_mask->hdr.cksum,
1351                                        sizeof(tcp_mask->hdr.cksum));
1352                 ulp_rte_prsr_mask_copy(params, &idx,
1353                                        &tcp_mask->hdr.tcp_urp,
1354                                        sizeof(tcp_mask->hdr.tcp_urp));
1355         }
1356         /* add number of TCP header elements */
1357         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1358
1359         /* Set the udp header bitmap and computed l4 header bitmaps */
1360         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1361             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1362                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1363                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1364         } else {
1365                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1366                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1367         }
1368         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1369         return BNXT_TF_RC_SUCCESS;
1370 }
1371
1372 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1373 int32_t
1374 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1375                           struct ulp_rte_parser_params *params)
1376 {
1377         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1378         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1379         struct ulp_rte_hdr_field *field;
1380         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1381         uint32_t idx = params->field_idx;
1382         uint32_t size;
1383
1384         /*
1385          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1386          * header fields
1387          */
1388         if (vxlan_spec) {
1389                 size = sizeof(vxlan_spec->flags);
1390                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1391                                                 &vxlan_spec->flags,
1392                                                 size);
1393                 size = sizeof(vxlan_spec->rsvd0);
1394                 field = ulp_rte_parser_fld_copy(field,
1395                                                 &vxlan_spec->rsvd0,
1396                                                 size);
1397                 size = sizeof(vxlan_spec->vni);
1398                 field = ulp_rte_parser_fld_copy(field,
1399                                                 &vxlan_spec->vni,
1400                                                 size);
1401                 size = sizeof(vxlan_spec->rsvd1);
1402                 field = ulp_rte_parser_fld_copy(field,
1403                                                 &vxlan_spec->rsvd1,
1404                                                 size);
1405         }
1406         if (vxlan_mask) {
1407                 ulp_rte_prsr_mask_copy(params, &idx,
1408                                        &vxlan_mask->flags,
1409                                        sizeof(vxlan_mask->flags));
1410                 ulp_rte_prsr_mask_copy(params, &idx,
1411                                        &vxlan_mask->rsvd0,
1412                                        sizeof(vxlan_mask->rsvd0));
1413                 ulp_rte_prsr_mask_copy(params, &idx,
1414                                        &vxlan_mask->vni,
1415                                        sizeof(vxlan_mask->vni));
1416                 ulp_rte_prsr_mask_copy(params, &idx,
1417                                        &vxlan_mask->rsvd1,
1418                                        sizeof(vxlan_mask->rsvd1));
1419         }
1420         /* Add number of vxlan header elements */
1421         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1422
1423         /* Update the hdr_bitmap with vxlan */
1424         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1425         return BNXT_TF_RC_SUCCESS;
1426 }
1427
1428 /* Function to handle the parsing of RTE Flow item void Header */
1429 int32_t
1430 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1431                          struct ulp_rte_parser_params *params __rte_unused)
1432 {
1433         return BNXT_TF_RC_SUCCESS;
1434 }
1435
1436 /* Function to handle the parsing of RTE Flow action void Header. */
1437 int32_t
1438 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1439                          struct ulp_rte_parser_params *params __rte_unused)
1440 {
1441         return BNXT_TF_RC_SUCCESS;
1442 }
1443
1444 /* Function to handle the parsing of RTE Flow action Mark Header. */
1445 int32_t
1446 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1447                          struct ulp_rte_parser_params *param)
1448 {
1449         const struct rte_flow_action_mark *mark;
1450         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1451         uint32_t mark_id;
1452
1453         mark = action_item->conf;
1454         if (mark) {
1455                 mark_id = tfp_cpu_to_be_32(mark->id);
1456                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1457                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1458
1459                 /* Update the hdr_bitmap with vxlan */
1460                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1461                 return BNXT_TF_RC_SUCCESS;
1462         }
1463         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1464         return BNXT_TF_RC_ERROR;
1465 }
1466
1467 /* Function to handle the parsing of RTE Flow action RSS Header. */
1468 int32_t
1469 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1470                         struct ulp_rte_parser_params *param)
1471 {
1472         const struct rte_flow_action_rss *rss = action_item->conf;
1473
1474         if (rss) {
1475                 /* Update the hdr_bitmap with vxlan */
1476                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1477                 return BNXT_TF_RC_SUCCESS;
1478         }
1479         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1480         return BNXT_TF_RC_ERROR;
1481 }
1482
1483 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1484 int32_t
1485 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1486                                 struct ulp_rte_parser_params *params)
1487 {
1488         const struct rte_flow_action_vxlan_encap *vxlan_encap;
1489         const struct rte_flow_item *item;
1490         const struct rte_flow_item_eth *eth_spec;
1491         const struct rte_flow_item_ipv4 *ipv4_spec;
1492         const struct rte_flow_item_ipv6 *ipv6_spec;
1493         struct rte_flow_item_vxlan vxlan_spec;
1494         uint32_t vlan_num = 0, vlan_size = 0;
1495         uint32_t ip_size = 0, ip_type = 0;
1496         uint32_t vxlan_size = 0;
1497         uint8_t *buff;
1498         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1499         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1500                                     0x00, 0x40, 0x11};
1501         /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1502         const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1503                                 0x00, 0x11, 0xf6};
1504         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1505         struct ulp_rte_act_prop *ap = &params->act_prop;
1506         const uint8_t *tmp_buff;
1507
1508         vxlan_encap = action_item->conf;
1509         if (!vxlan_encap) {
1510                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1511                 return BNXT_TF_RC_ERROR;
1512         }
1513
1514         item = vxlan_encap->definition;
1515         if (!item) {
1516                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1517                 return BNXT_TF_RC_ERROR;
1518         }
1519
1520         if (!ulp_rte_item_skip_void(&item, 0))
1521                 return BNXT_TF_RC_ERROR;
1522
1523         /* must have ethernet header */
1524         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1525                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1526                 return BNXT_TF_RC_ERROR;
1527         }
1528         eth_spec = item->spec;
1529         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1530         ulp_encap_buffer_copy(buff,
1531                               eth_spec->dst.addr_bytes,
1532                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1533                               ULP_BUFFER_ALIGN_8_BYTE);
1534
1535         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1536         ulp_encap_buffer_copy(buff,
1537                               eth_spec->src.addr_bytes,
1538                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1539                               ULP_BUFFER_ALIGN_8_BYTE);
1540
1541         /* Goto the next item */
1542         if (!ulp_rte_item_skip_void(&item, 1))
1543                 return BNXT_TF_RC_ERROR;
1544
1545         /* May have vlan header */
1546         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1547                 vlan_num++;
1548                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1549                 ulp_encap_buffer_copy(buff,
1550                                       item->spec,
1551                                       sizeof(struct rte_flow_item_vlan),
1552                                       ULP_BUFFER_ALIGN_8_BYTE);
1553
1554                 if (!ulp_rte_item_skip_void(&item, 1))
1555                         return BNXT_TF_RC_ERROR;
1556         }
1557
1558         /* may have two vlan headers */
1559         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1560                 vlan_num++;
1561                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1562                        sizeof(struct rte_flow_item_vlan)],
1563                        item->spec,
1564                        sizeof(struct rte_flow_item_vlan));
1565                 if (!ulp_rte_item_skip_void(&item, 1))
1566                         return BNXT_TF_RC_ERROR;
1567         }
1568         /* Update the vlan count and size of more than one */
1569         if (vlan_num) {
1570                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1571                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1572                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1573                        &vlan_num,
1574                        sizeof(uint32_t));
1575                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1576                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1577                        &vlan_size,
1578                        sizeof(uint32_t));
1579         }
1580
1581         /* L3 must be IPv4, IPv6 */
1582         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1583                 ipv4_spec = item->spec;
1584                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1585
1586                 /* copy the ipv4 details */
1587                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1588                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1589                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1590                         ulp_encap_buffer_copy(buff,
1591                                               def_ipv4_hdr,
1592                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1593                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1594                                               ULP_BUFFER_ALIGN_8_BYTE);
1595                 } else {
1596                         /* Total length being ignored in the ip hdr. */
1597                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1598                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1599                         ulp_encap_buffer_copy(buff,
1600                                               tmp_buff,
1601                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1602                                               ULP_BUFFER_ALIGN_8_BYTE);
1603                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1604                              BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1605                         ulp_encap_buffer_copy(buff,
1606                                               &ipv4_spec->hdr.version_ihl,
1607                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1608                                               ULP_BUFFER_ALIGN_8_BYTE);
1609                 }
1610
1611                 /* Update the dst ip address in ip encap buffer */
1612                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1613                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1614                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1615                 ulp_encap_buffer_copy(buff,
1616                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1617                                       sizeof(ipv4_spec->hdr.dst_addr),
1618                                       ULP_BUFFER_ALIGN_8_BYTE);
1619
1620                 /* Update the src ip address */
1621                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1622                         BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1623                         sizeof(ipv4_spec->hdr.src_addr)];
1624                 ulp_encap_buffer_copy(buff,
1625                                       (const uint8_t *)&ipv4_spec->hdr.src_addr,
1626                                       sizeof(ipv4_spec->hdr.src_addr),
1627                                       ULP_BUFFER_ALIGN_8_BYTE);
1628
1629                 /* Update the ip size details */
1630                 ip_size = tfp_cpu_to_be_32(ip_size);
1631                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1632                        &ip_size, sizeof(uint32_t));
1633
1634                 /* update the ip type */
1635                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1636                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1637                        &ip_type, sizeof(uint32_t));
1638
1639                 /* update the computed field to notify it is ipv4 header */
1640                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1641                                     1);
1642
1643                 if (!ulp_rte_item_skip_void(&item, 1))
1644                         return BNXT_TF_RC_ERROR;
1645         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1646                 ipv6_spec = item->spec;
1647                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1648
1649                 /* copy the ipv6 details */
1650                 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1651                 if (ulp_buffer_is_empty(tmp_buff,
1652                                         BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1653                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1654                         ulp_encap_buffer_copy(buff,
1655                                               def_ipv6_hdr,
1656                                               sizeof(def_ipv6_hdr),
1657                                               ULP_BUFFER_ALIGN_8_BYTE);
1658                 } else {
1659                         /* The payload length being ignored in the ip hdr. */
1660                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1661                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1662                         ulp_encap_buffer_copy(buff,
1663                                               tmp_buff,
1664                                               BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1665                                               ULP_BUFFER_ALIGN_8_BYTE);
1666                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1667                                 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1668                                 BNXT_ULP_ENCAP_IPV6_DO];
1669                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1670                         ulp_encap_buffer_copy(buff,
1671                                               tmp_buff,
1672                                               BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1673                                               ULP_BUFFER_ALIGN_8_BYTE);
1674                 }
1675                 /* Update the dst ip address in ip encap buffer */
1676                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1677                         sizeof(def_ipv6_hdr)];
1678                 ulp_encap_buffer_copy(buff,
1679                                       (const uint8_t *)ipv6_spec->hdr.dst_addr,
1680                                       sizeof(ipv6_spec->hdr.dst_addr),
1681                                       ULP_BUFFER_ALIGN_8_BYTE);
1682
1683                 /* Update the src ip address */
1684                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1685                 ulp_encap_buffer_copy(buff,
1686                                       (const uint8_t *)ipv6_spec->hdr.src_addr,
1687                                       sizeof(ipv6_spec->hdr.src_addr),
1688                                       ULP_BUFFER_ALIGN_16_BYTE);
1689
1690                 /* Update the ip size details */
1691                 ip_size = tfp_cpu_to_be_32(ip_size);
1692                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1693                        &ip_size, sizeof(uint32_t));
1694
1695                  /* update the ip type */
1696                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1697                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1698                        &ip_type, sizeof(uint32_t));
1699
1700                 /* update the computed field to notify it is ipv6 header */
1701                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1702                                     1);
1703
1704                 if (!ulp_rte_item_skip_void(&item, 1))
1705                         return BNXT_TF_RC_ERROR;
1706         } else {
1707                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1708                 return BNXT_TF_RC_ERROR;
1709         }
1710
1711         /* L4 is UDP */
1712         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1713                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1714                 return BNXT_TF_RC_ERROR;
1715         }
1716         /* copy the udp details */
1717         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1718                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1719                               ULP_BUFFER_ALIGN_8_BYTE);
1720
1721         if (!ulp_rte_item_skip_void(&item, 1))
1722                 return BNXT_TF_RC_ERROR;
1723
1724         /* Finally VXLAN */
1725         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1726                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1727                 return BNXT_TF_RC_ERROR;
1728         }
1729         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1730         /* copy the vxlan details */
1731         memcpy(&vxlan_spec, item->spec, vxlan_size);
1732         vxlan_spec.flags = 0x08;
1733         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1734         if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1735                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1736                                       vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1737         } else {
1738                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1739                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1740                 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1741                                       (const uint8_t *)&vxlan_spec.vni,
1742                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1743         }
1744         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1745         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1746                &vxlan_size, sizeof(uint32_t));
1747
1748         /* update the hdr_bitmap with vxlan */
1749         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1750         return BNXT_TF_RC_SUCCESS;
1751 }
1752
1753 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1754 int32_t
1755 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1756                                 __rte_unused,
1757                                 struct ulp_rte_parser_params *params)
1758 {
1759         /* update the hdr_bitmap with vxlan */
1760         ULP_BITMAP_SET(params->act_bitmap.bits,
1761                        BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1762         /* Update computational field with tunnel decap info */
1763         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1764         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1765         return BNXT_TF_RC_SUCCESS;
1766 }
1767
1768 /* Function to handle the parsing of RTE Flow action drop Header. */
1769 int32_t
1770 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1771                          struct ulp_rte_parser_params *params)
1772 {
1773         /* Update the hdr_bitmap with drop */
1774         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1775         return BNXT_TF_RC_SUCCESS;
1776 }
1777
1778 /* Function to handle the parsing of RTE Flow action count. */
1779 int32_t
1780 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1781                           struct ulp_rte_parser_params *params)
1782
1783 {
1784         const struct rte_flow_action_count *act_count;
1785         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1786
1787         act_count = action_item->conf;
1788         if (act_count) {
1789                 if (act_count->shared) {
1790                         BNXT_TF_DBG(ERR,
1791                                     "Parse Error:Shared count not supported\n");
1792                         return BNXT_TF_RC_PARSE_ERR;
1793                 }
1794                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1795                        &act_count->id,
1796                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1797         }
1798
1799         /* Update the hdr_bitmap with count */
1800         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1801         return BNXT_TF_RC_SUCCESS;
1802 }
1803
1804 /* Function to handle the parsing of action ports. */
1805 static int32_t
1806 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1807                             uint32_t ifindex)
1808 {
1809         enum bnxt_ulp_direction_type dir;
1810         uint16_t pid_s;
1811         uint32_t pid;
1812         struct ulp_rte_act_prop *act = &param->act_prop;
1813         enum bnxt_ulp_intf_type port_type;
1814         uint32_t vnic_type;
1815
1816         /* Get the direction */
1817         dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1818         if (dir == BNXT_ULP_DIR_EGRESS) {
1819                 /* For egress direction, fill vport */
1820                 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1821                         return BNXT_TF_RC_ERROR;
1822
1823                 pid = pid_s;
1824                 pid = rte_cpu_to_be_32(pid);
1825                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1826                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1827         } else {
1828                 /* For ingress direction, fill vnic */
1829                 port_type = ULP_COMP_FLD_IDX_RD(param,
1830                                                 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1831                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1832                         vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1833                 else
1834                         vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1835
1836                 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1837                                                  vnic_type, &pid_s))
1838                         return BNXT_TF_RC_ERROR;
1839
1840                 pid = pid_s;
1841                 pid = rte_cpu_to_be_32(pid);
1842                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1843                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1844         }
1845
1846         /* Update the action port set bit */
1847         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1848         return BNXT_TF_RC_SUCCESS;
1849 }
1850
1851 /* Function to handle the parsing of RTE Flow action PF. */
1852 int32_t
1853 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1854                        struct ulp_rte_parser_params *params)
1855 {
1856         uint32_t port_id;
1857         uint32_t ifindex;
1858         enum bnxt_ulp_intf_type intf_type;
1859
1860         /* Get the port id of the current device */
1861         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1862
1863         /* Get the port db ifindex */
1864         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1865                                               &ifindex)) {
1866                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1867                 return BNXT_TF_RC_ERROR;
1868         }
1869
1870         /* Check the port is PF port */
1871         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1872         if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1873                 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1874                 return BNXT_TF_RC_ERROR;
1875         }
1876         /* Update the action properties */
1877         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1878         return ulp_rte_parser_act_port_set(params, ifindex);
1879 }
1880
1881 /* Function to handle the parsing of RTE Flow action VF. */
1882 int32_t
1883 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1884                        struct ulp_rte_parser_params *params)
1885 {
1886         const struct rte_flow_action_vf *vf_action;
1887         uint32_t ifindex;
1888         enum bnxt_ulp_intf_type intf_type;
1889
1890         vf_action = action_item->conf;
1891         if (!vf_action) {
1892                 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1893                 return BNXT_TF_RC_PARSE_ERR;
1894         }
1895
1896         if (vf_action->original) {
1897                 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1898                 return BNXT_TF_RC_PARSE_ERR;
1899         }
1900
1901         /* Check the port is VF port */
1902         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1903                                                  &ifindex)) {
1904                 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1905                 return BNXT_TF_RC_ERROR;
1906         }
1907         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1908         if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1909             intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1910                 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1911                 return BNXT_TF_RC_ERROR;
1912         }
1913
1914         /* Update the action properties */
1915         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1916         return ulp_rte_parser_act_port_set(params, ifindex);
1917 }
1918
1919 /* Function to handle the parsing of RTE Flow action port_id. */
1920 int32_t
1921 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1922                             struct ulp_rte_parser_params *param)
1923 {
1924         const struct rte_flow_action_port_id *port_id = act_item->conf;
1925         uint32_t ifindex;
1926         enum bnxt_ulp_intf_type intf_type;
1927
1928         if (!port_id) {
1929                 BNXT_TF_DBG(ERR,
1930                             "ParseErr: Invalid Argument\n");
1931                 return BNXT_TF_RC_PARSE_ERR;
1932         }
1933         if (port_id->original) {
1934                 BNXT_TF_DBG(ERR,
1935                             "ParseErr:Portid Original not supported\n");
1936                 return BNXT_TF_RC_PARSE_ERR;
1937         }
1938
1939         /* Get the port db ifindex */
1940         if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1941                                               &ifindex)) {
1942                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1943                 return BNXT_TF_RC_ERROR;
1944         }
1945
1946         /* Get the intf type */
1947         intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1948         if (!intf_type) {
1949                 BNXT_TF_DBG(ERR, "Invalid port type\n");
1950                 return BNXT_TF_RC_ERROR;
1951         }
1952
1953         /* Set the action port */
1954         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1955         return ulp_rte_parser_act_port_set(param, ifindex);
1956 }
1957
1958 /* Function to handle the parsing of RTE Flow action phy_port. */
1959 int32_t
1960 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1961                              struct ulp_rte_parser_params *prm)
1962 {
1963         const struct rte_flow_action_phy_port *phy_port;
1964         uint32_t pid;
1965         int32_t rc;
1966         uint16_t pid_s;
1967         enum bnxt_ulp_direction_type dir;
1968
1969         phy_port = action_item->conf;
1970         if (!phy_port) {
1971                 BNXT_TF_DBG(ERR,
1972                             "ParseErr: Invalid Argument\n");
1973                 return BNXT_TF_RC_PARSE_ERR;
1974         }
1975
1976         if (phy_port->original) {
1977                 BNXT_TF_DBG(ERR,
1978                             "Parse Err:Port Original not supported\n");
1979                 return BNXT_TF_RC_PARSE_ERR;
1980         }
1981         dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1982         if (dir != BNXT_ULP_DIR_EGRESS) {
1983                 BNXT_TF_DBG(ERR,
1984                             "Parse Err:Phy ports are valid only for egress\n");
1985                 return BNXT_TF_RC_PARSE_ERR;
1986         }
1987         /* Get the physical port details from port db */
1988         rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1989                                             &pid_s);
1990         if (rc) {
1991                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1992                 return -EINVAL;
1993         }
1994
1995         pid = pid_s;
1996         pid = rte_cpu_to_be_32(pid);
1997         memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1998                &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1999
2000         /* Update the action port set bit */
2001         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2002         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2003                             BNXT_ULP_INTF_TYPE_PHY_PORT);
2004         return BNXT_TF_RC_SUCCESS;
2005 }
2006
2007 /* Function to handle the parsing of RTE Flow action pop vlan. */
2008 int32_t
2009 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2010                                 struct ulp_rte_parser_params *params)
2011 {
2012         /* Update the act_bitmap with pop */
2013         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
2014         return BNXT_TF_RC_SUCCESS;
2015 }
2016
2017 /* Function to handle the parsing of RTE Flow action push vlan. */
2018 int32_t
2019 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2020                                  struct ulp_rte_parser_params *params)
2021 {
2022         const struct rte_flow_action_of_push_vlan *push_vlan;
2023         uint16_t ethertype;
2024         struct ulp_rte_act_prop *act = &params->act_prop;
2025
2026         push_vlan = action_item->conf;
2027         if (push_vlan) {
2028                 ethertype = push_vlan->ethertype;
2029                 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2030                         BNXT_TF_DBG(ERR,
2031                                     "Parse Err: Ethertype not supported\n");
2032                         return BNXT_TF_RC_PARSE_ERR;
2033                 }
2034                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2035                        &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2036                 /* Update the hdr_bitmap with push vlan */
2037                 ULP_BITMAP_SET(params->act_bitmap.bits,
2038                                BNXT_ULP_ACTION_BIT_PUSH_VLAN);
2039                 return BNXT_TF_RC_SUCCESS;
2040         }
2041         BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2042         return BNXT_TF_RC_ERROR;
2043 }
2044
2045 /* Function to handle the parsing of RTE Flow action set vlan id. */
2046 int32_t
2047 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2048                                     struct ulp_rte_parser_params *params)
2049 {
2050         const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2051         uint32_t vid;
2052         struct ulp_rte_act_prop *act = &params->act_prop;
2053
2054         vlan_vid = action_item->conf;
2055         if (vlan_vid && vlan_vid->vlan_vid) {
2056                 vid = vlan_vid->vlan_vid;
2057                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2058                        &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2059                 /* Update the hdr_bitmap with vlan vid */
2060                 ULP_BITMAP_SET(params->act_bitmap.bits,
2061                                BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
2062                 return BNXT_TF_RC_SUCCESS;
2063         }
2064         BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2065         return BNXT_TF_RC_ERROR;
2066 }
2067
2068 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2069 int32_t
2070 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2071                                     struct ulp_rte_parser_params *params)
2072 {
2073         const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2074         uint8_t pcp;
2075         struct ulp_rte_act_prop *act = &params->act_prop;
2076
2077         vlan_pcp = action_item->conf;
2078         if (vlan_pcp) {
2079                 pcp = vlan_pcp->vlan_pcp;
2080                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2081                        &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2082                 /* Update the hdr_bitmap with vlan vid */
2083                 ULP_BITMAP_SET(params->act_bitmap.bits,
2084                                BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
2085                 return BNXT_TF_RC_SUCCESS;
2086         }
2087         BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2088         return BNXT_TF_RC_ERROR;
2089 }
2090
2091 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2092 int32_t
2093 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2094                                  struct ulp_rte_parser_params *params)
2095 {
2096         const struct rte_flow_action_set_ipv4 *set_ipv4;
2097         struct ulp_rte_act_prop *act = &params->act_prop;
2098
2099         set_ipv4 = action_item->conf;
2100         if (set_ipv4) {
2101                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2102                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2103                 /* Update the hdr_bitmap with set ipv4 src */
2104                 ULP_BITMAP_SET(params->act_bitmap.bits,
2105                                BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
2106                 return BNXT_TF_RC_SUCCESS;
2107         }
2108         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2109         return BNXT_TF_RC_ERROR;
2110 }
2111
2112 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2113 int32_t
2114 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2115                                  struct ulp_rte_parser_params *params)
2116 {
2117         const struct rte_flow_action_set_ipv4 *set_ipv4;
2118         struct ulp_rte_act_prop *act = &params->act_prop;
2119
2120         set_ipv4 = action_item->conf;
2121         if (set_ipv4) {
2122                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2123                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2124                 /* Update the hdr_bitmap with set ipv4 dst */
2125                 ULP_BITMAP_SET(params->act_bitmap.bits,
2126                                BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
2127                 return BNXT_TF_RC_SUCCESS;
2128         }
2129         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2130         return BNXT_TF_RC_ERROR;
2131 }
2132
2133 /* Function to handle the parsing of RTE Flow action set tp src.*/
2134 int32_t
2135 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2136                                struct ulp_rte_parser_params *params)
2137 {
2138         const struct rte_flow_action_set_tp *set_tp;
2139         struct ulp_rte_act_prop *act = &params->act_prop;
2140
2141         set_tp = action_item->conf;
2142         if (set_tp) {
2143                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2144                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2145                 /* Update the hdr_bitmap with set tp src */
2146                 ULP_BITMAP_SET(params->act_bitmap.bits,
2147                                BNXT_ULP_ACTION_BIT_SET_TP_SRC);
2148                 return BNXT_TF_RC_SUCCESS;
2149         }
2150
2151         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2152         return BNXT_TF_RC_ERROR;
2153 }
2154
2155 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2156 int32_t
2157 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2158                                struct ulp_rte_parser_params *params)
2159 {
2160         const struct rte_flow_action_set_tp *set_tp;
2161         struct ulp_rte_act_prop *act = &params->act_prop;
2162
2163         set_tp = action_item->conf;
2164         if (set_tp) {
2165                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2166                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2167                 /* Update the hdr_bitmap with set tp dst */
2168                 ULP_BITMAP_SET(params->act_bitmap.bits,
2169                                BNXT_ULP_ACTION_BIT_SET_TP_DST);
2170                 return BNXT_TF_RC_SUCCESS;
2171         }
2172
2173         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2174         return BNXT_TF_RC_ERROR;
2175 }
2176
2177 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2178 int32_t
2179 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2180                             struct ulp_rte_parser_params *params)
2181 {
2182         /* Update the act_bitmap with dec ttl */
2183         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2184         return BNXT_TF_RC_SUCCESS;
2185 }
2186
2187 /* Function to handle the parsing of RTE Flow action JUMP */
2188 int32_t
2189 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2190                             struct ulp_rte_parser_params *params)
2191 {
2192         /* Update the act_bitmap with dec ttl */
2193         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
2194         return BNXT_TF_RC_SUCCESS;
2195 }