net/bnxt: add a failure log
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "ulp_rte_parser.h"
12 #include "ulp_matcher.h"
13 #include "ulp_utils.h"
14 #include "tfp.h"
15 #include "ulp_port_db.h"
16 #include "ulp_flow_db.h"
17 #include "ulp_mapper.h"
18 #include "ulp_tun.h"
19
20 /* Local defines for the parsing functions */
21 #define ULP_VLAN_PRIORITY_SHIFT         13 /* First 3 bits */
22 #define ULP_VLAN_PRIORITY_MASK          0x700
23 #define ULP_VLAN_TAG_MASK               0xFFF /* Last 12 bits*/
24 #define ULP_UDP_PORT_VXLAN              4789
25
26 /* Utility function to skip the void items. */
27 static inline int32_t
28 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
29 {
30         if (!*item)
31                 return 0;
32         if (increment)
33                 (*item)++;
34         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
35                 (*item)++;
36         if (*item)
37                 return 1;
38         return 0;
39 }
40
41 /* Utility function to update the field_bitmap */
42 static void
43 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
44                                    uint32_t idx)
45 {
46         struct ulp_rte_hdr_field *field;
47
48         field = &params->hdr_field[idx];
49         if (ulp_bitmap_notzero(field->mask, field->size)) {
50                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
51                 /* Not exact match */
52                 if (!ulp_bitmap_is_ones(field->mask, field->size))
53                         ULP_BITMAP_SET(params->fld_bitmap.bits,
54                                        BNXT_ULP_MATCH_TYPE_BITMASK_WM);
55         } else {
56                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
57         }
58 }
59
60 /* Utility function to copy field spec items */
61 static struct ulp_rte_hdr_field *
62 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
63                         const void *buffer,
64                         uint32_t size)
65 {
66         field->size = size;
67         memcpy(field->spec, buffer, field->size);
68         field++;
69         return field;
70 }
71
72 /* Utility function to copy field masks items */
73 static void
74 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
75                        uint32_t *idx,
76                        const void *buffer,
77                        uint32_t size)
78 {
79         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
80
81         memcpy(field->mask, buffer, size);
82         ulp_rte_parser_field_bitmap_update(params, *idx);
83         *idx = *idx + 1;
84 }
85
86 /* Utility function to ignore field masks items */
87 static void
88 ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
89                          uint32_t *idx,
90                          const void *buffer __rte_unused,
91                          uint32_t size __rte_unused)
92 {
93         *idx = *idx + 1;
94 }
95
96 /*
97  * Function to handle the parsing of RTE Flows and placing
98  * the RTE flow items into the ulp structures.
99  */
100 int32_t
101 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
102                               struct ulp_rte_parser_params *params)
103 {
104         const struct rte_flow_item *item = pattern;
105         struct bnxt_ulp_rte_hdr_info *hdr_info;
106
107         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
108
109         /* Set the computed flags for no vlan tags before parsing */
110         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
111         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
112
113         /* Parse all the items in the pattern */
114         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
115                 /* get the header information from the flow_hdr_info table */
116                 hdr_info = &ulp_hdr_info[item->type];
117                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
118                         BNXT_TF_DBG(ERR,
119                                     "Truflow parser does not support type %d\n",
120                                     item->type);
121                         return BNXT_TF_RC_PARSE_ERR;
122                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
123                         /* call the registered callback handler */
124                         if (hdr_info->proto_hdr_func) {
125                                 if (hdr_info->proto_hdr_func(item, params) !=
126                                     BNXT_TF_RC_SUCCESS) {
127                                         return BNXT_TF_RC_ERROR;
128                                 }
129                         }
130                 }
131                 item++;
132         }
133         /* update the implied SVIF */
134         return ulp_rte_parser_implicit_match_port_process(params);
135 }
136
137 /*
138  * Function to handle the parsing of RTE Flows and placing
139  * the RTE flow actions into the ulp structures.
140  */
141 int32_t
142 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
143                               struct ulp_rte_parser_params *params)
144 {
145         const struct rte_flow_action *action_item = actions;
146         struct bnxt_ulp_rte_act_info *hdr_info;
147
148         /* Parse all the items in the pattern */
149         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
150                 /* get the header information from the flow_hdr_info table */
151                 hdr_info = &ulp_act_info[action_item->type];
152                 if (hdr_info->act_type ==
153                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
154                         BNXT_TF_DBG(ERR,
155                                     "Truflow parser does not support act %u\n",
156                                     action_item->type);
157                         return BNXT_TF_RC_ERROR;
158                 } else if (hdr_info->act_type ==
159                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
160                         /* call the registered callback handler */
161                         if (hdr_info->proto_act_func) {
162                                 if (hdr_info->proto_act_func(action_item,
163                                                              params) !=
164                                     BNXT_TF_RC_SUCCESS) {
165                                         return BNXT_TF_RC_ERROR;
166                                 }
167                         }
168                 }
169                 action_item++;
170         }
171         /* update the implied port details */
172         ulp_rte_parser_implicit_act_port_process(params);
173         return BNXT_TF_RC_SUCCESS;
174 }
175
176 /*
177  * Function to handle the post processing of the computed
178  * fields for the interface.
179  */
180 static void
181 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
182 {
183         uint32_t ifindex;
184         uint16_t port_id, parif;
185         uint32_t mtype;
186         enum bnxt_ulp_direction_type dir;
187
188         /* get the direction details */
189         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
190
191         /* read the port id details */
192         port_id = ULP_COMP_FLD_IDX_RD(params,
193                                       BNXT_ULP_CF_IDX_INCOMING_IF);
194         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
195                                               port_id,
196                                               &ifindex)) {
197                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
198                 return;
199         }
200
201         if (dir == BNXT_ULP_DIR_INGRESS) {
202                 /* Set port PARIF */
203                 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
204                                           BNXT_ULP_PHY_PORT_PARIF, &parif)) {
205                         BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
206                         return;
207                 }
208                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
209                                     parif);
210         } else {
211                 /* Get the match port type */
212                 mtype = ULP_COMP_FLD_IDX_RD(params,
213                                             BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
214                 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
215                         ULP_COMP_FLD_IDX_WR(params,
216                                             BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
217                                             1);
218                         /* Set VF func PARIF */
219                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
220                                                   BNXT_ULP_VF_FUNC_PARIF,
221                                                   &parif)) {
222                                 BNXT_TF_DBG(ERR,
223                                             "ParseErr:ifindex is not valid\n");
224                                 return;
225                         }
226                         ULP_COMP_FLD_IDX_WR(params,
227                                             BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
228                                             parif);
229
230                         /* populate the loopback parif */
231                         ULP_COMP_FLD_IDX_WR(params,
232                                             BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
233                                             BNXT_ULP_SYM_VF_FUNC_PARIF);
234
235                 } else {
236                         /* Set DRV func PARIF */
237                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
238                                                   BNXT_ULP_DRV_FUNC_PARIF,
239                                                   &parif)) {
240                                 BNXT_TF_DBG(ERR,
241                                             "ParseErr:ifindex is not valid\n");
242                                 return;
243                         }
244                         ULP_COMP_FLD_IDX_WR(params,
245                                             BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
246                                             parif);
247                 }
248         }
249 }
250
251 static int32_t
252 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
253 {
254         enum bnxt_ulp_intf_type match_port_type, act_port_type;
255         enum bnxt_ulp_direction_type dir;
256         uint32_t act_port_set;
257
258         /* Get the computed details */
259         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
260         match_port_type = ULP_COMP_FLD_IDX_RD(params,
261                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
262         act_port_type = ULP_COMP_FLD_IDX_RD(params,
263                                             BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
264         act_port_set = ULP_COMP_FLD_IDX_RD(params,
265                                            BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
266
267         /* set the flow direction in the proto and action header */
268         if (dir == BNXT_ULP_DIR_EGRESS) {
269                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
270                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
271                 ULP_BITMAP_SET(params->act_bitmap.bits,
272                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
273         }
274
275         /* calculate the VF to VF flag */
276         if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
277             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
278                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
279
280         /* Update the decrement ttl computational fields */
281         if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
282                              BNXT_ULP_ACTION_BIT_DEC_TTL)) {
283                 /*
284                  * Check that vxlan proto is included and vxlan decap
285                  * action is not set then decrement tunnel ttl.
286                  * Similarly add GRE and NVGRE in future.
287                  */
288                 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
289                                       BNXT_ULP_HDR_BIT_T_VXLAN) &&
290                     !ULP_BITMAP_ISSET(params->act_bitmap.bits,
291                                       BNXT_ULP_ACTION_BIT_VXLAN_DECAP))) {
292                         ULP_COMP_FLD_IDX_WR(params,
293                                             BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
294                 } else {
295                         ULP_COMP_FLD_IDX_WR(params,
296                                             BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
297                 }
298         }
299
300         /* Merge the hdr_fp_bit into the proto header bit */
301         params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
302
303         /* Update the computed interface parameters */
304         bnxt_ulp_comp_fld_intf_update(params);
305
306         /* TBD: Handle the flow rejection scenarios */
307         return 0;
308 }
309
310 /*
311  * Function to handle the post processing of the parsing details
312  */
313 int32_t
314 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
315 {
316         ulp_post_process_normal_flow(params);
317         return ulp_post_process_tun_flow(params);
318 }
319
320 /*
321  * Function to compute the flow direction based on the match port details
322  */
323 static void
324 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
325 {
326         enum bnxt_ulp_intf_type match_port_type;
327
328         /* Get the match port type */
329         match_port_type = ULP_COMP_FLD_IDX_RD(params,
330                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
331
332         /* If ingress flow and matchport is vf rep then dir is egress*/
333         if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
334             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
335                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
336                                     BNXT_ULP_DIR_EGRESS);
337         } else {
338                 /* Assign the input direction */
339                 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
340                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
341                                             BNXT_ULP_DIR_INGRESS);
342                 else
343                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
344                                             BNXT_ULP_DIR_EGRESS);
345         }
346 }
347
348 /* Function to handle the parsing of RTE Flow item PF Header. */
349 static int32_t
350 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
351                         uint32_t ifindex,
352                         uint16_t mask)
353 {
354         uint16_t svif;
355         enum bnxt_ulp_direction_type dir;
356         struct ulp_rte_hdr_field *hdr_field;
357         enum bnxt_ulp_svif_type svif_type;
358         enum bnxt_ulp_intf_type port_type;
359
360         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
361             BNXT_ULP_INVALID_SVIF_VAL) {
362                 BNXT_TF_DBG(ERR,
363                             "SVIF already set,multiple source not support'd\n");
364                 return BNXT_TF_RC_ERROR;
365         }
366
367         /* Get port type details */
368         port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
369         if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
370                 BNXT_TF_DBG(ERR, "Invalid port type\n");
371                 return BNXT_TF_RC_ERROR;
372         }
373
374         /* Update the match port type */
375         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
376
377         /* compute the direction */
378         bnxt_ulp_rte_parser_direction_compute(params);
379
380         /* Get the computed direction */
381         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
382         if (dir == BNXT_ULP_DIR_INGRESS) {
383                 svif_type = BNXT_ULP_PHY_PORT_SVIF;
384         } else {
385                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
386                         svif_type = BNXT_ULP_VF_FUNC_SVIF;
387                 else
388                         svif_type = BNXT_ULP_DRV_FUNC_SVIF;
389         }
390         ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
391                              &svif);
392         svif = rte_cpu_to_be_16(svif);
393         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
394         memcpy(hdr_field->spec, &svif, sizeof(svif));
395         memcpy(hdr_field->mask, &mask, sizeof(mask));
396         hdr_field->size = sizeof(svif);
397         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
398                             rte_be_to_cpu_16(svif));
399         return BNXT_TF_RC_SUCCESS;
400 }
401
402 /* Function to handle the parsing of the RTE port id */
403 int32_t
404 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
405 {
406         uint16_t port_id = 0;
407         uint16_t svif_mask = 0xFFFF;
408         uint32_t ifindex;
409         int32_t rc = BNXT_TF_RC_ERROR;
410
411         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
412             BNXT_ULP_INVALID_SVIF_VAL)
413                 return BNXT_TF_RC_SUCCESS;
414
415         /* SVIF not set. So get the port id */
416         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
417
418         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
419                                               port_id,
420                                               &ifindex)) {
421                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
422                 return rc;
423         }
424
425         /* Update the SVIF details */
426         rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
427         return rc;
428 }
429
430 /* Function to handle the implicit action port id */
431 int32_t
432 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
433 {
434         struct rte_flow_action action_item = {0};
435         struct rte_flow_action_port_id port_id = {0};
436
437         /* Read the action port set bit */
438         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
439                 /* Already set, so just exit */
440                 return BNXT_TF_RC_SUCCESS;
441         }
442         port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
443         action_item.conf = &port_id;
444
445         /* Update the action port based on incoming port */
446         ulp_rte_port_id_act_handler(&action_item, params);
447
448         /* Reset the action port set bit */
449         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
450         return BNXT_TF_RC_SUCCESS;
451 }
452
453 /* Function to handle the parsing of RTE Flow item PF Header. */
454 int32_t
455 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
456                        struct ulp_rte_parser_params *params)
457 {
458         uint16_t port_id = 0;
459         uint16_t svif_mask = 0xFFFF;
460         uint32_t ifindex;
461
462         /* Get the implicit port id */
463         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
464
465         /* perform the conversion from dpdk port to bnxt ifindex */
466         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
467                                               port_id,
468                                               &ifindex)) {
469                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
470                 return BNXT_TF_RC_ERROR;
471         }
472
473         /* Update the SVIF details */
474         return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
475 }
476
477 /* Function to handle the parsing of RTE Flow item VF Header. */
478 int32_t
479 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
480                        struct ulp_rte_parser_params *params)
481 {
482         const struct rte_flow_item_vf *vf_spec = item->spec;
483         const struct rte_flow_item_vf *vf_mask = item->mask;
484         uint16_t mask = 0;
485         uint32_t ifindex;
486         int32_t rc = BNXT_TF_RC_PARSE_ERR;
487
488         /* Get VF rte_flow_item for Port details */
489         if (!vf_spec) {
490                 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
491                 return rc;
492         }
493         if (!vf_mask) {
494                 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
495                 return rc;
496         }
497         mask = vf_mask->id;
498
499         /* perform the conversion from VF Func id to bnxt ifindex */
500         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
501                                                  vf_spec->id,
502                                                  &ifindex)) {
503                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
504                 return rc;
505         }
506         /* Update the SVIF details */
507         return ulp_rte_parser_svif_set(params, ifindex, mask);
508 }
509
510 /* Function to handle the parsing of RTE Flow item port id  Header. */
511 int32_t
512 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
513                             struct ulp_rte_parser_params *params)
514 {
515         const struct rte_flow_item_port_id *port_spec = item->spec;
516         const struct rte_flow_item_port_id *port_mask = item->mask;
517         uint16_t mask = 0;
518         int32_t rc = BNXT_TF_RC_PARSE_ERR;
519         uint32_t ifindex;
520
521         if (!port_spec) {
522                 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
523                 return rc;
524         }
525         if (!port_mask) {
526                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
527                 return rc;
528         }
529         mask = port_mask->id;
530
531         /* perform the conversion from dpdk port to bnxt ifindex */
532         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
533                                               port_spec->id,
534                                               &ifindex)) {
535                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
536                 return rc;
537         }
538         /* Update the SVIF details */
539         return ulp_rte_parser_svif_set(params, ifindex, mask);
540 }
541
542 /* Function to handle the parsing of RTE Flow item phy port Header. */
543 int32_t
544 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
545                              struct ulp_rte_parser_params *params)
546 {
547         const struct rte_flow_item_phy_port *port_spec = item->spec;
548         const struct rte_flow_item_phy_port *port_mask = item->mask;
549         uint16_t mask = 0;
550         int32_t rc = BNXT_TF_RC_ERROR;
551         uint16_t svif;
552         enum bnxt_ulp_direction_type dir;
553         struct ulp_rte_hdr_field *hdr_field;
554
555         /* Copy the rte_flow_item for phy port into hdr_field */
556         if (!port_spec) {
557                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
558                 return rc;
559         }
560         if (!port_mask) {
561                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
562                 return rc;
563         }
564         mask = port_mask->index;
565
566         /* Update the match port type */
567         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
568                             BNXT_ULP_INTF_TYPE_PHY_PORT);
569
570         /* Compute the Hw direction */
571         bnxt_ulp_rte_parser_direction_compute(params);
572
573         /* Direction validation */
574         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
575         if (dir == BNXT_ULP_DIR_EGRESS) {
576                 BNXT_TF_DBG(ERR,
577                             "Parse Err:Phy ports are valid only for ingress\n");
578                 return BNXT_TF_RC_PARSE_ERR;
579         }
580
581         /* Get the physical port details from port db */
582         rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
583                                            &svif);
584         if (rc) {
585                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
586                 return BNXT_TF_RC_PARSE_ERR;
587         }
588
589         /* Update the SVIF details */
590         svif = rte_cpu_to_be_16(svif);
591         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
592         memcpy(hdr_field->spec, &svif, sizeof(svif));
593         memcpy(hdr_field->mask, &mask, sizeof(mask));
594         hdr_field->size = sizeof(svif);
595         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
596                             rte_be_to_cpu_16(svif));
597         return BNXT_TF_RC_SUCCESS;
598 }
599
600 /* Function to handle the update of proto header based on field values */
601 static void
602 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
603                              uint16_t type, uint32_t in_flag)
604 {
605         if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
606                 if (in_flag) {
607                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
608                                        BNXT_ULP_HDR_BIT_I_IPV4);
609                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
610                 } else {
611                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
612                                        BNXT_ULP_HDR_BIT_O_IPV4);
613                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
614                 }
615         } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
616                 if (in_flag) {
617                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
618                                        BNXT_ULP_HDR_BIT_I_IPV6);
619                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
620                 } else {
621                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
622                                        BNXT_ULP_HDR_BIT_O_IPV6);
623                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
624                 }
625         }
626 }
627
628 /* Internal Function to identify broadcast or multicast packets */
629 static int32_t
630 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
631 {
632         if (rte_is_multicast_ether_addr(eth_addr) ||
633             rte_is_broadcast_ether_addr(eth_addr)) {
634                 BNXT_TF_DBG(DEBUG,
635                             "No support for bcast or mcast addr offload\n");
636                 return 1;
637         }
638         return 0;
639 }
640
641 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
642 int32_t
643 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
644                         struct ulp_rte_parser_params *params)
645 {
646         const struct rte_flow_item_eth *eth_spec = item->spec;
647         const struct rte_flow_item_eth *eth_mask = item->mask;
648         struct ulp_rte_hdr_field *field;
649         uint32_t idx = params->field_idx;
650         uint32_t size;
651         uint16_t eth_type = 0;
652         uint32_t inner_flag = 0;
653
654         /*
655          * Copy the rte_flow_item for eth into hdr_field using ethernet
656          * header fields
657          */
658         if (eth_spec) {
659                 size = sizeof(eth_spec->dst.addr_bytes);
660                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
661                                                 eth_spec->dst.addr_bytes,
662                                                 size);
663                 /* Todo: work around to avoid multicast and broadcast addr */
664                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->dst))
665                         return BNXT_TF_RC_PARSE_ERR;
666
667                 size = sizeof(eth_spec->src.addr_bytes);
668                 field = ulp_rte_parser_fld_copy(field,
669                                                 eth_spec->src.addr_bytes,
670                                                 size);
671                 /* Todo: work around to avoid multicast and broadcast addr */
672                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->src))
673                         return BNXT_TF_RC_PARSE_ERR;
674
675                 field = ulp_rte_parser_fld_copy(field,
676                                                 &eth_spec->type,
677                                                 sizeof(eth_spec->type));
678                 eth_type = eth_spec->type;
679         }
680         if (eth_mask) {
681                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
682                                        sizeof(eth_mask->dst.addr_bytes));
683                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
684                                        sizeof(eth_mask->src.addr_bytes));
685                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
686                                        sizeof(eth_mask->type));
687         }
688         /* Add number of vlan header elements */
689         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
690         params->vlan_idx = params->field_idx;
691         params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
692
693         /* Update the protocol hdr bitmap */
694         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
695                              BNXT_ULP_HDR_BIT_O_ETH) ||
696             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
697                              BNXT_ULP_HDR_BIT_O_IPV4) ||
698             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
699                              BNXT_ULP_HDR_BIT_O_IPV6) ||
700             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
701                              BNXT_ULP_HDR_BIT_O_UDP) ||
702             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
703                              BNXT_ULP_HDR_BIT_O_TCP)) {
704                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
705                 inner_flag = 1;
706         } else {
707                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
708         }
709         /* Update the field protocol hdr bitmap */
710         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
711
712         return BNXT_TF_RC_SUCCESS;
713 }
714
715 /* Function to handle the parsing of RTE Flow item Vlan Header. */
716 int32_t
717 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
718                          struct ulp_rte_parser_params *params)
719 {
720         const struct rte_flow_item_vlan *vlan_spec = item->spec;
721         const struct rte_flow_item_vlan *vlan_mask = item->mask;
722         struct ulp_rte_hdr_field *field;
723         struct ulp_rte_hdr_bitmap       *hdr_bit;
724         uint32_t idx = params->vlan_idx;
725         uint16_t vlan_tag, priority;
726         uint32_t outer_vtag_num;
727         uint32_t inner_vtag_num;
728         uint16_t eth_type = 0;
729         uint32_t inner_flag = 0;
730
731         /*
732          * Copy the rte_flow_item for vlan into hdr_field using Vlan
733          * header fields
734          */
735         if (vlan_spec) {
736                 vlan_tag = ntohs(vlan_spec->tci);
737                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
738                 vlan_tag &= ULP_VLAN_TAG_MASK;
739                 vlan_tag = htons(vlan_tag);
740
741                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
742                                                 &priority,
743                                                 sizeof(priority));
744                 field = ulp_rte_parser_fld_copy(field,
745                                                 &vlan_tag,
746                                                 sizeof(vlan_tag));
747                 field = ulp_rte_parser_fld_copy(field,
748                                                 &vlan_spec->inner_type,
749                                                 sizeof(vlan_spec->inner_type));
750                 eth_type = vlan_spec->inner_type;
751         }
752
753         if (vlan_mask) {
754                 vlan_tag = ntohs(vlan_mask->tci);
755                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
756                 vlan_tag &= 0xfff;
757
758                 /*
759                  * the storage for priority and vlan tag is 2 bytes
760                  * The mask of priority which is 3 bits if it is all 1's
761                  * then make the rest bits 13 bits as 1's
762                  * so that it is matched as exact match.
763                  */
764                 if (priority == ULP_VLAN_PRIORITY_MASK)
765                         priority |= ~ULP_VLAN_PRIORITY_MASK;
766                 if (vlan_tag == ULP_VLAN_TAG_MASK)
767                         vlan_tag |= ~ULP_VLAN_TAG_MASK;
768                 vlan_tag = htons(vlan_tag);
769
770                 /*
771                  * The priority field is ignored since OVS is setting it as
772                  * wild card match and it is not supported. This is a work
773                  * around and shall be addressed in the future.
774                  */
775                 ulp_rte_prsr_mask_ignore(params, &idx, &priority,
776                                          sizeof(priority));
777
778                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
779                                        sizeof(vlan_tag));
780                 ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
781                                        sizeof(vlan_mask->inner_type));
782         }
783         /* Set the vlan index to new incremented value */
784         params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
785
786         /* Get the outer tag and inner tag counts */
787         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
788                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
789         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
790                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
791
792         /* Update the hdr_bitmap of the vlans */
793         hdr_bit = &params->hdr_bitmap;
794         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
795             !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
796             !outer_vtag_num) {
797                 /* Update the vlan tag num */
798                 outer_vtag_num++;
799                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
800                                     outer_vtag_num);
801                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
802                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
803                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
804                                BNXT_ULP_HDR_BIT_OO_VLAN);
805         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
806                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
807                    outer_vtag_num == 1) {
808                 /* update the vlan tag num */
809                 outer_vtag_num++;
810                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
811                                     outer_vtag_num);
812                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
813                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
814                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
815                                BNXT_ULP_HDR_BIT_OI_VLAN);
816         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
817                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
818                    !inner_vtag_num) {
819                 /* update the vlan tag num */
820                 inner_vtag_num++;
821                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
822                                     inner_vtag_num);
823                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
824                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
825                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
826                                BNXT_ULP_HDR_BIT_IO_VLAN);
827                 inner_flag = 1;
828         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
829                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
830                    inner_vtag_num == 1) {
831                 /* update the vlan tag num */
832                 inner_vtag_num++;
833                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
834                                     inner_vtag_num);
835                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
836                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
837                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
838                                BNXT_ULP_HDR_BIT_II_VLAN);
839                 inner_flag = 1;
840         } else {
841                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
842                 return BNXT_TF_RC_ERROR;
843         }
844         /* Update the field protocol hdr bitmap */
845         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
846         return BNXT_TF_RC_SUCCESS;
847 }
848
849 /* Function to handle the update of proto header based on field values */
850 static void
851 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
852                              uint8_t proto, uint32_t in_flag)
853 {
854         if (proto == IPPROTO_UDP) {
855                 if (in_flag) {
856                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
857                                        BNXT_ULP_HDR_BIT_I_UDP);
858                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
859                 } else {
860                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
861                                        BNXT_ULP_HDR_BIT_O_UDP);
862                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
863                 }
864         } else if (proto == IPPROTO_TCP) {
865                 if (in_flag) {
866                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
867                                        BNXT_ULP_HDR_BIT_I_TCP);
868                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
869                 } else {
870                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
871                                        BNXT_ULP_HDR_BIT_O_TCP);
872                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
873                 }
874         }
875 }
876
877 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
878 int32_t
879 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
880                          struct ulp_rte_parser_params *params)
881 {
882         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
883         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
884         struct ulp_rte_hdr_field *field;
885         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
886         uint32_t idx = params->field_idx;
887         uint32_t size;
888         uint8_t proto = 0;
889         uint32_t inner_flag = 0;
890         uint32_t cnt;
891
892         /* validate there are no 3rd L3 header */
893         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
894         if (cnt == 2) {
895                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
896                 return BNXT_TF_RC_ERROR;
897         }
898
899         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
900                               BNXT_ULP_HDR_BIT_O_ETH) &&
901             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
902                               BNXT_ULP_HDR_BIT_I_ETH)) {
903                 /* Since F2 flow does not include eth item, when parser detects
904                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
905                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
906                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
907                  * This will allow the parser post processor to update the
908                  * t_dmac in hdr_field[o_eth.dmac]
909                  */
910                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
911                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
912                 params->field_idx = idx;
913         }
914
915         /*
916          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
917          * header fields
918          */
919         if (ipv4_spec) {
920                 size = sizeof(ipv4_spec->hdr.version_ihl);
921                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
922                                                 &ipv4_spec->hdr.version_ihl,
923                                                 size);
924                 size = sizeof(ipv4_spec->hdr.type_of_service);
925                 field = ulp_rte_parser_fld_copy(field,
926                                                 &ipv4_spec->hdr.type_of_service,
927                                                 size);
928                 size = sizeof(ipv4_spec->hdr.total_length);
929                 field = ulp_rte_parser_fld_copy(field,
930                                                 &ipv4_spec->hdr.total_length,
931                                                 size);
932                 size = sizeof(ipv4_spec->hdr.packet_id);
933                 field = ulp_rte_parser_fld_copy(field,
934                                                 &ipv4_spec->hdr.packet_id,
935                                                 size);
936                 size = sizeof(ipv4_spec->hdr.fragment_offset);
937                 field = ulp_rte_parser_fld_copy(field,
938                                                 &ipv4_spec->hdr.fragment_offset,
939                                                 size);
940                 size = sizeof(ipv4_spec->hdr.time_to_live);
941                 field = ulp_rte_parser_fld_copy(field,
942                                                 &ipv4_spec->hdr.time_to_live,
943                                                 size);
944                 size = sizeof(ipv4_spec->hdr.next_proto_id);
945                 field = ulp_rte_parser_fld_copy(field,
946                                                 &ipv4_spec->hdr.next_proto_id,
947                                                 size);
948                 proto = ipv4_spec->hdr.next_proto_id;
949                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
950                 field = ulp_rte_parser_fld_copy(field,
951                                                 &ipv4_spec->hdr.hdr_checksum,
952                                                 size);
953                 size = sizeof(ipv4_spec->hdr.src_addr);
954                 field = ulp_rte_parser_fld_copy(field,
955                                                 &ipv4_spec->hdr.src_addr,
956                                                 size);
957                 size = sizeof(ipv4_spec->hdr.dst_addr);
958                 field = ulp_rte_parser_fld_copy(field,
959                                                 &ipv4_spec->hdr.dst_addr,
960                                                 size);
961         }
962         if (ipv4_mask) {
963                 ulp_rte_prsr_mask_copy(params, &idx,
964                                        &ipv4_mask->hdr.version_ihl,
965                                        sizeof(ipv4_mask->hdr.version_ihl));
966                 /*
967                  * The tos field is ignored since OVS is setting it as wild card
968                  * match and it is not supported. This is a work around and
969                  * shall be addressed in the future.
970                  */
971                 ulp_rte_prsr_mask_ignore(params, &idx,
972                                          &ipv4_mask->hdr.type_of_service,
973                                          sizeof(ipv4_mask->hdr.type_of_service)
974                                          );
975
976                 ulp_rte_prsr_mask_copy(params, &idx,
977                                        &ipv4_mask->hdr.total_length,
978                                        sizeof(ipv4_mask->hdr.total_length));
979                 ulp_rte_prsr_mask_copy(params, &idx,
980                                        &ipv4_mask->hdr.packet_id,
981                                        sizeof(ipv4_mask->hdr.packet_id));
982                 ulp_rte_prsr_mask_copy(params, &idx,
983                                        &ipv4_mask->hdr.fragment_offset,
984                                        sizeof(ipv4_mask->hdr.fragment_offset));
985                 ulp_rte_prsr_mask_copy(params, &idx,
986                                        &ipv4_mask->hdr.time_to_live,
987                                        sizeof(ipv4_mask->hdr.time_to_live));
988                 ulp_rte_prsr_mask_copy(params, &idx,
989                                        &ipv4_mask->hdr.next_proto_id,
990                                        sizeof(ipv4_mask->hdr.next_proto_id));
991                 ulp_rte_prsr_mask_copy(params, &idx,
992                                        &ipv4_mask->hdr.hdr_checksum,
993                                        sizeof(ipv4_mask->hdr.hdr_checksum));
994                 ulp_rte_prsr_mask_copy(params, &idx,
995                                        &ipv4_mask->hdr.src_addr,
996                                        sizeof(ipv4_mask->hdr.src_addr));
997                 ulp_rte_prsr_mask_copy(params, &idx,
998                                        &ipv4_mask->hdr.dst_addr,
999                                        sizeof(ipv4_mask->hdr.dst_addr));
1000         }
1001         /* Add the number of ipv4 header elements */
1002         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
1003
1004         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1005         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1006             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1007                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1008                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1009                 inner_flag = 1;
1010         } else {
1011                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1012                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1013         }
1014
1015         /* Update the field protocol hdr bitmap */
1016         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1017         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1018         return BNXT_TF_RC_SUCCESS;
1019 }
1020
1021 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1022 int32_t
1023 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1024                          struct ulp_rte_parser_params *params)
1025 {
1026         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1027         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1028         struct ulp_rte_hdr_field *field;
1029         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1030         uint32_t idx = params->field_idx;
1031         uint32_t size;
1032         uint32_t vtcf, vtcf_mask;
1033         uint8_t proto = 0;
1034         uint32_t inner_flag = 0;
1035         uint32_t cnt;
1036
1037         /* validate there are no 3rd L3 header */
1038         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1039         if (cnt == 2) {
1040                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1041                 return BNXT_TF_RC_ERROR;
1042         }
1043
1044         if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1045                               BNXT_ULP_HDR_BIT_O_ETH) &&
1046             !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
1047                               BNXT_ULP_HDR_BIT_I_ETH)) {
1048                 /* Since F2 flow does not include eth item, when parser detects
1049                  * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
1050                  * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
1051                  * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
1052                  * This will allow the parser post processor to update the
1053                  * t_dmac in hdr_field[o_eth.dmac]
1054                  */
1055                 idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
1056                         BNXT_ULP_PROTO_HDR_VLAN_NUM);
1057                 params->field_idx = idx;
1058         }
1059
1060         /*
1061          * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1062          * header fields
1063          */
1064         if (ipv6_spec) {
1065                 size = sizeof(ipv6_spec->hdr.vtc_flow);
1066
1067                 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1068                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1069                                                 &vtcf,
1070                                                 size);
1071
1072                 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1073                 field = ulp_rte_parser_fld_copy(field,
1074                                                 &vtcf,
1075                                                 size);
1076
1077                 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1078                 field = ulp_rte_parser_fld_copy(field,
1079                                                 &vtcf,
1080                                                 size);
1081
1082                 size = sizeof(ipv6_spec->hdr.payload_len);
1083                 field = ulp_rte_parser_fld_copy(field,
1084                                                 &ipv6_spec->hdr.payload_len,
1085                                                 size);
1086                 size = sizeof(ipv6_spec->hdr.proto);
1087                 field = ulp_rte_parser_fld_copy(field,
1088                                                 &ipv6_spec->hdr.proto,
1089                                                 size);
1090                 proto = ipv6_spec->hdr.proto;
1091                 size = sizeof(ipv6_spec->hdr.hop_limits);
1092                 field = ulp_rte_parser_fld_copy(field,
1093                                                 &ipv6_spec->hdr.hop_limits,
1094                                                 size);
1095                 size = sizeof(ipv6_spec->hdr.src_addr);
1096                 field = ulp_rte_parser_fld_copy(field,
1097                                                 &ipv6_spec->hdr.src_addr,
1098                                                 size);
1099                 size = sizeof(ipv6_spec->hdr.dst_addr);
1100                 field = ulp_rte_parser_fld_copy(field,
1101                                                 &ipv6_spec->hdr.dst_addr,
1102                                                 size);
1103         }
1104         if (ipv6_mask) {
1105                 size = sizeof(ipv6_mask->hdr.vtc_flow);
1106
1107                 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1108                 ulp_rte_prsr_mask_copy(params, &idx,
1109                                        &vtcf_mask,
1110                                        size);
1111                 /*
1112                  * The TC and flow label field are ignored since OVS is
1113                  * setting it for match and it is not supported.
1114                  * This is a work around and
1115                  * shall be addressed in the future.
1116                  */
1117                 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1118                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1119                 vtcf_mask =
1120                         BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1121                 ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
1122
1123                 ulp_rte_prsr_mask_copy(params, &idx,
1124                                        &ipv6_mask->hdr.payload_len,
1125                                        sizeof(ipv6_mask->hdr.payload_len));
1126                 ulp_rte_prsr_mask_copy(params, &idx,
1127                                        &ipv6_mask->hdr.proto,
1128                                        sizeof(ipv6_mask->hdr.proto));
1129                 ulp_rte_prsr_mask_copy(params, &idx,
1130                                        &ipv6_mask->hdr.hop_limits,
1131                                        sizeof(ipv6_mask->hdr.hop_limits));
1132                 ulp_rte_prsr_mask_copy(params, &idx,
1133                                        &ipv6_mask->hdr.src_addr,
1134                                        sizeof(ipv6_mask->hdr.src_addr));
1135                 ulp_rte_prsr_mask_copy(params, &idx,
1136                                        &ipv6_mask->hdr.dst_addr,
1137                                        sizeof(ipv6_mask->hdr.dst_addr));
1138         }
1139         /* add number of ipv6 header elements */
1140         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
1141
1142         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1143         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1144             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1145                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1146                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1147                 inner_flag = 1;
1148         } else {
1149                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1150                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1151         }
1152
1153         /* Update the field protocol hdr bitmap */
1154         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1155         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1156
1157         return BNXT_TF_RC_SUCCESS;
1158 }
1159
1160 /* Function to handle the update of proto header based on field values */
1161 static void
1162 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1163                              uint16_t dst_port)
1164 {
1165         if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1166                 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1167                                BNXT_ULP_HDR_BIT_T_VXLAN);
1168                 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1169         }
1170 }
1171
1172 /* Function to handle the parsing of RTE Flow item UDP Header. */
1173 int32_t
1174 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1175                         struct ulp_rte_parser_params *params)
1176 {
1177         const struct rte_flow_item_udp *udp_spec = item->spec;
1178         const struct rte_flow_item_udp *udp_mask = item->mask;
1179         struct ulp_rte_hdr_field *field;
1180         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1181         uint32_t idx = params->field_idx;
1182         uint32_t size;
1183         uint16_t dst_port = 0;
1184         uint32_t cnt;
1185
1186         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1187         if (cnt == 2) {
1188                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1189                 return BNXT_TF_RC_ERROR;
1190         }
1191
1192         /*
1193          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1194          * header fields
1195          */
1196         if (udp_spec) {
1197                 size = sizeof(udp_spec->hdr.src_port);
1198                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1199                                                 &udp_spec->hdr.src_port,
1200                                                 size);
1201
1202                 size = sizeof(udp_spec->hdr.dst_port);
1203                 field = ulp_rte_parser_fld_copy(field,
1204                                                 &udp_spec->hdr.dst_port,
1205                                                 size);
1206                 dst_port = udp_spec->hdr.dst_port;
1207                 size = sizeof(udp_spec->hdr.dgram_len);
1208                 field = ulp_rte_parser_fld_copy(field,
1209                                                 &udp_spec->hdr.dgram_len,
1210                                                 size);
1211                 size = sizeof(udp_spec->hdr.dgram_cksum);
1212                 field = ulp_rte_parser_fld_copy(field,
1213                                                 &udp_spec->hdr.dgram_cksum,
1214                                                 size);
1215         }
1216         if (udp_mask) {
1217                 ulp_rte_prsr_mask_copy(params, &idx,
1218                                        &udp_mask->hdr.src_port,
1219                                        sizeof(udp_mask->hdr.src_port));
1220                 ulp_rte_prsr_mask_copy(params, &idx,
1221                                        &udp_mask->hdr.dst_port,
1222                                        sizeof(udp_mask->hdr.dst_port));
1223                 ulp_rte_prsr_mask_copy(params, &idx,
1224                                        &udp_mask->hdr.dgram_len,
1225                                        sizeof(udp_mask->hdr.dgram_len));
1226                 ulp_rte_prsr_mask_copy(params, &idx,
1227                                        &udp_mask->hdr.dgram_cksum,
1228                                        sizeof(udp_mask->hdr.dgram_cksum));
1229         }
1230
1231         /* Add number of UDP header elements */
1232         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
1233
1234         /* Set the udp header bitmap and computed l4 header bitmaps */
1235         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1236             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1237                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1238                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1239         } else {
1240                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1241                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1242                 /* Update the field protocol hdr bitmap */
1243                 ulp_rte_l4_proto_type_update(params, dst_port);
1244         }
1245         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1246         return BNXT_TF_RC_SUCCESS;
1247 }
1248
1249 /* Function to handle the parsing of RTE Flow item TCP Header. */
1250 int32_t
1251 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1252                         struct ulp_rte_parser_params *params)
1253 {
1254         const struct rte_flow_item_tcp *tcp_spec = item->spec;
1255         const struct rte_flow_item_tcp *tcp_mask = item->mask;
1256         struct ulp_rte_hdr_field *field;
1257         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1258         uint32_t idx = params->field_idx;
1259         uint32_t size;
1260         uint32_t cnt;
1261
1262         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1263         if (cnt == 2) {
1264                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1265                 return BNXT_TF_RC_ERROR;
1266         }
1267
1268         /*
1269          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1270          * header fields
1271          */
1272         if (tcp_spec) {
1273                 size = sizeof(tcp_spec->hdr.src_port);
1274                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1275                                                 &tcp_spec->hdr.src_port,
1276                                                 size);
1277                 size = sizeof(tcp_spec->hdr.dst_port);
1278                 field = ulp_rte_parser_fld_copy(field,
1279                                                 &tcp_spec->hdr.dst_port,
1280                                                 size);
1281                 size = sizeof(tcp_spec->hdr.sent_seq);
1282                 field = ulp_rte_parser_fld_copy(field,
1283                                                 &tcp_spec->hdr.sent_seq,
1284                                                 size);
1285                 size = sizeof(tcp_spec->hdr.recv_ack);
1286                 field = ulp_rte_parser_fld_copy(field,
1287                                                 &tcp_spec->hdr.recv_ack,
1288                                                 size);
1289                 size = sizeof(tcp_spec->hdr.data_off);
1290                 field = ulp_rte_parser_fld_copy(field,
1291                                                 &tcp_spec->hdr.data_off,
1292                                                 size);
1293                 size = sizeof(tcp_spec->hdr.tcp_flags);
1294                 field = ulp_rte_parser_fld_copy(field,
1295                                                 &tcp_spec->hdr.tcp_flags,
1296                                                 size);
1297                 size = sizeof(tcp_spec->hdr.rx_win);
1298                 field = ulp_rte_parser_fld_copy(field,
1299                                                 &tcp_spec->hdr.rx_win,
1300                                                 size);
1301                 size = sizeof(tcp_spec->hdr.cksum);
1302                 field = ulp_rte_parser_fld_copy(field,
1303                                                 &tcp_spec->hdr.cksum,
1304                                                 size);
1305                 size = sizeof(tcp_spec->hdr.tcp_urp);
1306                 field = ulp_rte_parser_fld_copy(field,
1307                                                 &tcp_spec->hdr.tcp_urp,
1308                                                 size);
1309         } else {
1310                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1311         }
1312
1313         if (tcp_mask) {
1314                 ulp_rte_prsr_mask_copy(params, &idx,
1315                                        &tcp_mask->hdr.src_port,
1316                                        sizeof(tcp_mask->hdr.src_port));
1317                 ulp_rte_prsr_mask_copy(params, &idx,
1318                                        &tcp_mask->hdr.dst_port,
1319                                        sizeof(tcp_mask->hdr.dst_port));
1320                 ulp_rte_prsr_mask_copy(params, &idx,
1321                                        &tcp_mask->hdr.sent_seq,
1322                                        sizeof(tcp_mask->hdr.sent_seq));
1323                 ulp_rte_prsr_mask_copy(params, &idx,
1324                                        &tcp_mask->hdr.recv_ack,
1325                                        sizeof(tcp_mask->hdr.recv_ack));
1326                 ulp_rte_prsr_mask_copy(params, &idx,
1327                                        &tcp_mask->hdr.data_off,
1328                                        sizeof(tcp_mask->hdr.data_off));
1329                 ulp_rte_prsr_mask_copy(params, &idx,
1330                                        &tcp_mask->hdr.tcp_flags,
1331                                        sizeof(tcp_mask->hdr.tcp_flags));
1332                 ulp_rte_prsr_mask_copy(params, &idx,
1333                                        &tcp_mask->hdr.rx_win,
1334                                        sizeof(tcp_mask->hdr.rx_win));
1335                 ulp_rte_prsr_mask_copy(params, &idx,
1336                                        &tcp_mask->hdr.cksum,
1337                                        sizeof(tcp_mask->hdr.cksum));
1338                 ulp_rte_prsr_mask_copy(params, &idx,
1339                                        &tcp_mask->hdr.tcp_urp,
1340                                        sizeof(tcp_mask->hdr.tcp_urp));
1341         }
1342         /* add number of TCP header elements */
1343         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1344
1345         /* Set the udp header bitmap and computed l4 header bitmaps */
1346         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1347             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1348                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1349                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1350         } else {
1351                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1352                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1353         }
1354         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1355         return BNXT_TF_RC_SUCCESS;
1356 }
1357
1358 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1359 int32_t
1360 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1361                           struct ulp_rte_parser_params *params)
1362 {
1363         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1364         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1365         struct ulp_rte_hdr_field *field;
1366         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1367         uint32_t idx = params->field_idx;
1368         uint32_t size;
1369
1370         /*
1371          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1372          * header fields
1373          */
1374         if (vxlan_spec) {
1375                 size = sizeof(vxlan_spec->flags);
1376                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1377                                                 &vxlan_spec->flags,
1378                                                 size);
1379                 size = sizeof(vxlan_spec->rsvd0);
1380                 field = ulp_rte_parser_fld_copy(field,
1381                                                 &vxlan_spec->rsvd0,
1382                                                 size);
1383                 size = sizeof(vxlan_spec->vni);
1384                 field = ulp_rte_parser_fld_copy(field,
1385                                                 &vxlan_spec->vni,
1386                                                 size);
1387                 size = sizeof(vxlan_spec->rsvd1);
1388                 field = ulp_rte_parser_fld_copy(field,
1389                                                 &vxlan_spec->rsvd1,
1390                                                 size);
1391         }
1392         if (vxlan_mask) {
1393                 ulp_rte_prsr_mask_copy(params, &idx,
1394                                        &vxlan_mask->flags,
1395                                        sizeof(vxlan_mask->flags));
1396                 ulp_rte_prsr_mask_copy(params, &idx,
1397                                        &vxlan_mask->rsvd0,
1398                                        sizeof(vxlan_mask->rsvd0));
1399                 ulp_rte_prsr_mask_copy(params, &idx,
1400                                        &vxlan_mask->vni,
1401                                        sizeof(vxlan_mask->vni));
1402                 ulp_rte_prsr_mask_copy(params, &idx,
1403                                        &vxlan_mask->rsvd1,
1404                                        sizeof(vxlan_mask->rsvd1));
1405         }
1406         /* Add number of vxlan header elements */
1407         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1408
1409         /* Update the hdr_bitmap with vxlan */
1410         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1411         return BNXT_TF_RC_SUCCESS;
1412 }
1413
1414 /* Function to handle the parsing of RTE Flow item void Header */
1415 int32_t
1416 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1417                          struct ulp_rte_parser_params *params __rte_unused)
1418 {
1419         return BNXT_TF_RC_SUCCESS;
1420 }
1421
1422 /* Function to handle the parsing of RTE Flow action void Header. */
1423 int32_t
1424 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1425                          struct ulp_rte_parser_params *params __rte_unused)
1426 {
1427         return BNXT_TF_RC_SUCCESS;
1428 }
1429
1430 /* Function to handle the parsing of RTE Flow action Mark Header. */
1431 int32_t
1432 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1433                          struct ulp_rte_parser_params *param)
1434 {
1435         const struct rte_flow_action_mark *mark;
1436         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1437         uint32_t mark_id;
1438
1439         mark = action_item->conf;
1440         if (mark) {
1441                 mark_id = tfp_cpu_to_be_32(mark->id);
1442                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1443                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1444
1445                 /* Update the hdr_bitmap with vxlan */
1446                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1447                 return BNXT_TF_RC_SUCCESS;
1448         }
1449         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1450         return BNXT_TF_RC_ERROR;
1451 }
1452
1453 /* Function to handle the parsing of RTE Flow action RSS Header. */
1454 int32_t
1455 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1456                         struct ulp_rte_parser_params *param)
1457 {
1458         const struct rte_flow_action_rss *rss = action_item->conf;
1459
1460         if (rss) {
1461                 /* Update the hdr_bitmap with vxlan */
1462                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1463                 return BNXT_TF_RC_SUCCESS;
1464         }
1465         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1466         return BNXT_TF_RC_ERROR;
1467 }
1468
1469 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1470 int32_t
1471 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1472                                 struct ulp_rte_parser_params *params)
1473 {
1474         const struct rte_flow_action_vxlan_encap *vxlan_encap;
1475         const struct rte_flow_item *item;
1476         const struct rte_flow_item_eth *eth_spec;
1477         const struct rte_flow_item_ipv4 *ipv4_spec;
1478         const struct rte_flow_item_ipv6 *ipv6_spec;
1479         struct rte_flow_item_vxlan vxlan_spec;
1480         uint32_t vlan_num = 0, vlan_size = 0;
1481         uint32_t ip_size = 0, ip_type = 0;
1482         uint32_t vxlan_size = 0;
1483         uint8_t *buff;
1484         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1485         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1486                                     0x00, 0x40, 0x11};
1487         /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
1488         const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
1489                                 0x00, 0x11, 0xf6};
1490         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1491         struct ulp_rte_act_prop *ap = &params->act_prop;
1492         const uint8_t *tmp_buff;
1493
1494         vxlan_encap = action_item->conf;
1495         if (!vxlan_encap) {
1496                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1497                 return BNXT_TF_RC_ERROR;
1498         }
1499
1500         item = vxlan_encap->definition;
1501         if (!item) {
1502                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1503                 return BNXT_TF_RC_ERROR;
1504         }
1505
1506         if (!ulp_rte_item_skip_void(&item, 0))
1507                 return BNXT_TF_RC_ERROR;
1508
1509         /* must have ethernet header */
1510         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1511                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1512                 return BNXT_TF_RC_ERROR;
1513         }
1514         eth_spec = item->spec;
1515         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1516         ulp_encap_buffer_copy(buff,
1517                               eth_spec->dst.addr_bytes,
1518                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
1519                               ULP_BUFFER_ALIGN_8_BYTE);
1520
1521         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1522         ulp_encap_buffer_copy(buff,
1523                               eth_spec->src.addr_bytes,
1524                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
1525                               ULP_BUFFER_ALIGN_8_BYTE);
1526
1527         /* Goto the next item */
1528         if (!ulp_rte_item_skip_void(&item, 1))
1529                 return BNXT_TF_RC_ERROR;
1530
1531         /* May have vlan header */
1532         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1533                 vlan_num++;
1534                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1535                 ulp_encap_buffer_copy(buff,
1536                                       item->spec,
1537                                       sizeof(struct rte_flow_item_vlan),
1538                                       ULP_BUFFER_ALIGN_8_BYTE);
1539
1540                 if (!ulp_rte_item_skip_void(&item, 1))
1541                         return BNXT_TF_RC_ERROR;
1542         }
1543
1544         /* may have two vlan headers */
1545         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1546                 vlan_num++;
1547                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1548                        sizeof(struct rte_flow_item_vlan)],
1549                        item->spec,
1550                        sizeof(struct rte_flow_item_vlan));
1551                 if (!ulp_rte_item_skip_void(&item, 1))
1552                         return BNXT_TF_RC_ERROR;
1553         }
1554         /* Update the vlan count and size of more than one */
1555         if (vlan_num) {
1556                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1557                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1558                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1559                        &vlan_num,
1560                        sizeof(uint32_t));
1561                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1562                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1563                        &vlan_size,
1564                        sizeof(uint32_t));
1565         }
1566
1567         /* L3 must be IPv4, IPv6 */
1568         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1569                 ipv4_spec = item->spec;
1570                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1571
1572                 /* copy the ipv4 details */
1573                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1574                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1575                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1576                         ulp_encap_buffer_copy(buff,
1577                                               def_ipv4_hdr,
1578                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1579                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1580                                               ULP_BUFFER_ALIGN_8_BYTE);
1581                 } else {
1582                         /* Total length being ignored in the ip hdr. */
1583                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1584                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1585                         ulp_encap_buffer_copy(buff,
1586                                               tmp_buff,
1587                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO,
1588                                               ULP_BUFFER_ALIGN_8_BYTE);
1589                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1590                              BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1591                         ulp_encap_buffer_copy(buff,
1592                                               &ipv4_spec->hdr.version_ihl,
1593                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
1594                                               ULP_BUFFER_ALIGN_8_BYTE);
1595                 }
1596
1597                 /* Update the dst ip address in ip encap buffer */
1598                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1599                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1600                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1601                 ulp_encap_buffer_copy(buff,
1602                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1603                                       sizeof(ipv4_spec->hdr.dst_addr),
1604                                       ULP_BUFFER_ALIGN_8_BYTE);
1605
1606                 /* Update the src ip address */
1607                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
1608                         BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
1609                         sizeof(ipv4_spec->hdr.src_addr)];
1610                 ulp_encap_buffer_copy(buff,
1611                                       (const uint8_t *)&ipv4_spec->hdr.src_addr,
1612                                       sizeof(ipv4_spec->hdr.src_addr),
1613                                       ULP_BUFFER_ALIGN_8_BYTE);
1614
1615                 /* Update the ip size details */
1616                 ip_size = tfp_cpu_to_be_32(ip_size);
1617                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1618                        &ip_size, sizeof(uint32_t));
1619
1620                 /* update the ip type */
1621                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1622                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1623                        &ip_type, sizeof(uint32_t));
1624
1625                 /* update the computed field to notify it is ipv4 header */
1626                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1627                                     1);
1628
1629                 if (!ulp_rte_item_skip_void(&item, 1))
1630                         return BNXT_TF_RC_ERROR;
1631         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1632                 ipv6_spec = item->spec;
1633                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1634
1635                 /* copy the ipv6 details */
1636                 tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1637                 if (ulp_buffer_is_empty(tmp_buff,
1638                                         BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
1639                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1640                         ulp_encap_buffer_copy(buff,
1641                                               def_ipv6_hdr,
1642                                               sizeof(def_ipv6_hdr),
1643                                               ULP_BUFFER_ALIGN_8_BYTE);
1644                 } else {
1645                         /* The payload length being ignored in the ip hdr. */
1646                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1647                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
1648                         ulp_encap_buffer_copy(buff,
1649                                               tmp_buff,
1650                                               BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
1651                                               ULP_BUFFER_ALIGN_8_BYTE);
1652                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1653                                 BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
1654                                 BNXT_ULP_ENCAP_IPV6_DO];
1655                         tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
1656                         ulp_encap_buffer_copy(buff,
1657                                               tmp_buff,
1658                                               BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
1659                                               ULP_BUFFER_ALIGN_8_BYTE);
1660                 }
1661                 /* Update the dst ip address in ip encap buffer */
1662                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1663                         sizeof(def_ipv6_hdr)];
1664                 ulp_encap_buffer_copy(buff,
1665                                       (const uint8_t *)ipv6_spec->hdr.dst_addr,
1666                                       sizeof(ipv6_spec->hdr.dst_addr),
1667                                       ULP_BUFFER_ALIGN_8_BYTE);
1668
1669                 /* Update the src ip address */
1670                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1671                 ulp_encap_buffer_copy(buff,
1672                                       (const uint8_t *)ipv6_spec->hdr.src_addr,
1673                                       sizeof(ipv6_spec->hdr.src_addr),
1674                                       ULP_BUFFER_ALIGN_16_BYTE);
1675
1676                 /* Update the ip size details */
1677                 ip_size = tfp_cpu_to_be_32(ip_size);
1678                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1679                        &ip_size, sizeof(uint32_t));
1680
1681                  /* update the ip type */
1682                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1683                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1684                        &ip_type, sizeof(uint32_t));
1685
1686                 /* update the computed field to notify it is ipv6 header */
1687                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1688                                     1);
1689
1690                 if (!ulp_rte_item_skip_void(&item, 1))
1691                         return BNXT_TF_RC_ERROR;
1692         } else {
1693                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1694                 return BNXT_TF_RC_ERROR;
1695         }
1696
1697         /* L4 is UDP */
1698         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1699                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1700                 return BNXT_TF_RC_ERROR;
1701         }
1702         /* copy the udp details */
1703         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1704                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
1705                               ULP_BUFFER_ALIGN_8_BYTE);
1706
1707         if (!ulp_rte_item_skip_void(&item, 1))
1708                 return BNXT_TF_RC_ERROR;
1709
1710         /* Finally VXLAN */
1711         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1712                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1713                 return BNXT_TF_RC_ERROR;
1714         }
1715         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1716         /* copy the vxlan details */
1717         memcpy(&vxlan_spec, item->spec, vxlan_size);
1718         vxlan_spec.flags = 0x08;
1719         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
1720         if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
1721                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1722                                       vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
1723         } else {
1724                 ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
1725                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1726                 ulp_encap_buffer_copy(buff + (vxlan_size / 2),
1727                                       (const uint8_t *)&vxlan_spec.vni,
1728                                       vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
1729         }
1730         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1731         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1732                &vxlan_size, sizeof(uint32_t));
1733
1734         /* update the hdr_bitmap with vxlan */
1735         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1736         return BNXT_TF_RC_SUCCESS;
1737 }
1738
1739 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1740 int32_t
1741 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1742                                 __rte_unused,
1743                                 struct ulp_rte_parser_params *params)
1744 {
1745         /* update the hdr_bitmap with vxlan */
1746         ULP_BITMAP_SET(params->act_bitmap.bits,
1747                        BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1748         /* Update computational field with tunnel decap info */
1749         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
1750         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
1751         return BNXT_TF_RC_SUCCESS;
1752 }
1753
1754 /* Function to handle the parsing of RTE Flow action drop Header. */
1755 int32_t
1756 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1757                          struct ulp_rte_parser_params *params)
1758 {
1759         /* Update the hdr_bitmap with drop */
1760         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1761         return BNXT_TF_RC_SUCCESS;
1762 }
1763
1764 /* Function to handle the parsing of RTE Flow action count. */
1765 int32_t
1766 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1767                           struct ulp_rte_parser_params *params)
1768
1769 {
1770         const struct rte_flow_action_count *act_count;
1771         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1772
1773         act_count = action_item->conf;
1774         if (act_count) {
1775                 if (act_count->shared) {
1776                         BNXT_TF_DBG(ERR,
1777                                     "Parse Error:Shared count not supported\n");
1778                         return BNXT_TF_RC_PARSE_ERR;
1779                 }
1780                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1781                        &act_count->id,
1782                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1783         }
1784
1785         /* Update the hdr_bitmap with count */
1786         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1787         return BNXT_TF_RC_SUCCESS;
1788 }
1789
1790 /* Function to handle the parsing of action ports. */
1791 static int32_t
1792 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1793                             uint32_t ifindex)
1794 {
1795         enum bnxt_ulp_direction_type dir;
1796         uint16_t pid_s;
1797         uint32_t pid;
1798         struct ulp_rte_act_prop *act = &param->act_prop;
1799         enum bnxt_ulp_intf_type port_type;
1800         uint32_t vnic_type;
1801
1802         /* Get the direction */
1803         dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1804         if (dir == BNXT_ULP_DIR_EGRESS) {
1805                 /* For egress direction, fill vport */
1806                 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1807                         return BNXT_TF_RC_ERROR;
1808
1809                 pid = pid_s;
1810                 pid = rte_cpu_to_be_32(pid);
1811                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1812                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1813         } else {
1814                 /* For ingress direction, fill vnic */
1815                 port_type = ULP_COMP_FLD_IDX_RD(param,
1816                                                 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
1817                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
1818                         vnic_type = BNXT_ULP_VF_FUNC_VNIC;
1819                 else
1820                         vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
1821
1822                 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1823                                                  vnic_type, &pid_s))
1824                         return BNXT_TF_RC_ERROR;
1825
1826                 pid = pid_s;
1827                 pid = rte_cpu_to_be_32(pid);
1828                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1829                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1830         }
1831
1832         /* Update the action port set bit */
1833         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1834         return BNXT_TF_RC_SUCCESS;
1835 }
1836
1837 /* Function to handle the parsing of RTE Flow action PF. */
1838 int32_t
1839 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1840                        struct ulp_rte_parser_params *params)
1841 {
1842         uint32_t port_id;
1843         uint32_t ifindex;
1844         enum bnxt_ulp_intf_type intf_type;
1845
1846         /* Get the port id of the current device */
1847         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1848
1849         /* Get the port db ifindex */
1850         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1851                                               &ifindex)) {
1852                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1853                 return BNXT_TF_RC_ERROR;
1854         }
1855
1856         /* Check the port is PF port */
1857         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1858         if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1859                 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1860                 return BNXT_TF_RC_ERROR;
1861         }
1862         /* Update the action properties */
1863         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1864         return ulp_rte_parser_act_port_set(params, ifindex);
1865 }
1866
1867 /* Function to handle the parsing of RTE Flow action VF. */
1868 int32_t
1869 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1870                        struct ulp_rte_parser_params *params)
1871 {
1872         const struct rte_flow_action_vf *vf_action;
1873         uint32_t ifindex;
1874         enum bnxt_ulp_intf_type intf_type;
1875
1876         vf_action = action_item->conf;
1877         if (!vf_action) {
1878                 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1879                 return BNXT_TF_RC_PARSE_ERR;
1880         }
1881
1882         if (vf_action->original) {
1883                 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1884                 return BNXT_TF_RC_PARSE_ERR;
1885         }
1886
1887         /* Check the port is VF port */
1888         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1889                                                  &ifindex)) {
1890                 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1891                 return BNXT_TF_RC_ERROR;
1892         }
1893         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1894         if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1895             intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1896                 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1897                 return BNXT_TF_RC_ERROR;
1898         }
1899
1900         /* Update the action properties */
1901         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1902         return ulp_rte_parser_act_port_set(params, ifindex);
1903 }
1904
1905 /* Function to handle the parsing of RTE Flow action port_id. */
1906 int32_t
1907 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1908                             struct ulp_rte_parser_params *param)
1909 {
1910         const struct rte_flow_action_port_id *port_id = act_item->conf;
1911         uint32_t ifindex;
1912         enum bnxt_ulp_intf_type intf_type;
1913
1914         if (!port_id) {
1915                 BNXT_TF_DBG(ERR,
1916                             "ParseErr: Invalid Argument\n");
1917                 return BNXT_TF_RC_PARSE_ERR;
1918         }
1919         if (port_id->original) {
1920                 BNXT_TF_DBG(ERR,
1921                             "ParseErr:Portid Original not supported\n");
1922                 return BNXT_TF_RC_PARSE_ERR;
1923         }
1924
1925         /* Get the port db ifindex */
1926         if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1927                                               &ifindex)) {
1928                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1929                 return BNXT_TF_RC_ERROR;
1930         }
1931
1932         /* Get the intf type */
1933         intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1934         if (!intf_type) {
1935                 BNXT_TF_DBG(ERR, "Invalid port type\n");
1936                 return BNXT_TF_RC_ERROR;
1937         }
1938
1939         /* Set the action port */
1940         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1941         return ulp_rte_parser_act_port_set(param, ifindex);
1942 }
1943
1944 /* Function to handle the parsing of RTE Flow action phy_port. */
1945 int32_t
1946 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1947                              struct ulp_rte_parser_params *prm)
1948 {
1949         const struct rte_flow_action_phy_port *phy_port;
1950         uint32_t pid;
1951         int32_t rc;
1952         uint16_t pid_s;
1953         enum bnxt_ulp_direction_type dir;
1954
1955         phy_port = action_item->conf;
1956         if (!phy_port) {
1957                 BNXT_TF_DBG(ERR,
1958                             "ParseErr: Invalid Argument\n");
1959                 return BNXT_TF_RC_PARSE_ERR;
1960         }
1961
1962         if (phy_port->original) {
1963                 BNXT_TF_DBG(ERR,
1964                             "Parse Err:Port Original not supported\n");
1965                 return BNXT_TF_RC_PARSE_ERR;
1966         }
1967         dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1968         if (dir != BNXT_ULP_DIR_EGRESS) {
1969                 BNXT_TF_DBG(ERR,
1970                             "Parse Err:Phy ports are valid only for egress\n");
1971                 return BNXT_TF_RC_PARSE_ERR;
1972         }
1973         /* Get the physical port details from port db */
1974         rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1975                                             &pid_s);
1976         if (rc) {
1977                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1978                 return -EINVAL;
1979         }
1980
1981         pid = pid_s;
1982         pid = rte_cpu_to_be_32(pid);
1983         memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1984                &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1985
1986         /* Update the action port set bit */
1987         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1988         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1989                             BNXT_ULP_INTF_TYPE_PHY_PORT);
1990         return BNXT_TF_RC_SUCCESS;
1991 }
1992
1993 /* Function to handle the parsing of RTE Flow action pop vlan. */
1994 int32_t
1995 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1996                                 struct ulp_rte_parser_params *params)
1997 {
1998         /* Update the act_bitmap with pop */
1999         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
2000         return BNXT_TF_RC_SUCCESS;
2001 }
2002
2003 /* Function to handle the parsing of RTE Flow action push vlan. */
2004 int32_t
2005 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2006                                  struct ulp_rte_parser_params *params)
2007 {
2008         const struct rte_flow_action_of_push_vlan *push_vlan;
2009         uint16_t ethertype;
2010         struct ulp_rte_act_prop *act = &params->act_prop;
2011
2012         push_vlan = action_item->conf;
2013         if (push_vlan) {
2014                 ethertype = push_vlan->ethertype;
2015                 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2016                         BNXT_TF_DBG(ERR,
2017                                     "Parse Err: Ethertype not supported\n");
2018                         return BNXT_TF_RC_PARSE_ERR;
2019                 }
2020                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2021                        &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2022                 /* Update the hdr_bitmap with push vlan */
2023                 ULP_BITMAP_SET(params->act_bitmap.bits,
2024                                BNXT_ULP_ACTION_BIT_PUSH_VLAN);
2025                 return BNXT_TF_RC_SUCCESS;
2026         }
2027         BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2028         return BNXT_TF_RC_ERROR;
2029 }
2030
2031 /* Function to handle the parsing of RTE Flow action set vlan id. */
2032 int32_t
2033 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2034                                     struct ulp_rte_parser_params *params)
2035 {
2036         const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2037         uint32_t vid;
2038         struct ulp_rte_act_prop *act = &params->act_prop;
2039
2040         vlan_vid = action_item->conf;
2041         if (vlan_vid && vlan_vid->vlan_vid) {
2042                 vid = vlan_vid->vlan_vid;
2043                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2044                        &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2045                 /* Update the hdr_bitmap with vlan vid */
2046                 ULP_BITMAP_SET(params->act_bitmap.bits,
2047                                BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
2048                 return BNXT_TF_RC_SUCCESS;
2049         }
2050         BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2051         return BNXT_TF_RC_ERROR;
2052 }
2053
2054 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2055 int32_t
2056 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2057                                     struct ulp_rte_parser_params *params)
2058 {
2059         const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2060         uint8_t pcp;
2061         struct ulp_rte_act_prop *act = &params->act_prop;
2062
2063         vlan_pcp = action_item->conf;
2064         if (vlan_pcp) {
2065                 pcp = vlan_pcp->vlan_pcp;
2066                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2067                        &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2068                 /* Update the hdr_bitmap with vlan vid */
2069                 ULP_BITMAP_SET(params->act_bitmap.bits,
2070                                BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
2071                 return BNXT_TF_RC_SUCCESS;
2072         }
2073         BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2074         return BNXT_TF_RC_ERROR;
2075 }
2076
2077 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2078 int32_t
2079 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2080                                  struct ulp_rte_parser_params *params)
2081 {
2082         const struct rte_flow_action_set_ipv4 *set_ipv4;
2083         struct ulp_rte_act_prop *act = &params->act_prop;
2084
2085         set_ipv4 = action_item->conf;
2086         if (set_ipv4) {
2087                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2088                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2089                 /* Update the hdr_bitmap with set ipv4 src */
2090                 ULP_BITMAP_SET(params->act_bitmap.bits,
2091                                BNXT_ULP_ACTION_BIT_SET_IPV4_SRC);
2092                 return BNXT_TF_RC_SUCCESS;
2093         }
2094         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2095         return BNXT_TF_RC_ERROR;
2096 }
2097
2098 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2099 int32_t
2100 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2101                                  struct ulp_rte_parser_params *params)
2102 {
2103         const struct rte_flow_action_set_ipv4 *set_ipv4;
2104         struct ulp_rte_act_prop *act = &params->act_prop;
2105
2106         set_ipv4 = action_item->conf;
2107         if (set_ipv4) {
2108                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2109                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2110                 /* Update the hdr_bitmap with set ipv4 dst */
2111                 ULP_BITMAP_SET(params->act_bitmap.bits,
2112                                BNXT_ULP_ACTION_BIT_SET_IPV4_DST);
2113                 return BNXT_TF_RC_SUCCESS;
2114         }
2115         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2116         return BNXT_TF_RC_ERROR;
2117 }
2118
2119 /* Function to handle the parsing of RTE Flow action set tp src.*/
2120 int32_t
2121 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2122                                struct ulp_rte_parser_params *params)
2123 {
2124         const struct rte_flow_action_set_tp *set_tp;
2125         struct ulp_rte_act_prop *act = &params->act_prop;
2126
2127         set_tp = action_item->conf;
2128         if (set_tp) {
2129                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2130                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2131                 /* Update the hdr_bitmap with set tp src */
2132                 ULP_BITMAP_SET(params->act_bitmap.bits,
2133                                BNXT_ULP_ACTION_BIT_SET_TP_SRC);
2134                 return BNXT_TF_RC_SUCCESS;
2135         }
2136
2137         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2138         return BNXT_TF_RC_ERROR;
2139 }
2140
2141 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2142 int32_t
2143 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2144                                struct ulp_rte_parser_params *params)
2145 {
2146         const struct rte_flow_action_set_tp *set_tp;
2147         struct ulp_rte_act_prop *act = &params->act_prop;
2148
2149         set_tp = action_item->conf;
2150         if (set_tp) {
2151                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2152                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2153                 /* Update the hdr_bitmap with set tp dst */
2154                 ULP_BITMAP_SET(params->act_bitmap.bits,
2155                                BNXT_ULP_ACTION_BIT_SET_TP_DST);
2156                 return BNXT_TF_RC_SUCCESS;
2157         }
2158
2159         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2160         return BNXT_TF_RC_ERROR;
2161 }
2162
2163 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2164 int32_t
2165 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2166                             struct ulp_rte_parser_params *params)
2167 {
2168         /* Update the act_bitmap with dec ttl */
2169         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
2170         return BNXT_TF_RC_SUCCESS;
2171 }
2172
2173 /* Function to handle the parsing of RTE Flow action JUMP */
2174 int32_t
2175 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2176                             struct ulp_rte_parser_params *params)
2177 {
2178         /* Update the act_bitmap with dec ttl */
2179         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
2180         return BNXT_TF_RC_SUCCESS;
2181 }