dce95de05c7f116148763f21accb939d1207a217
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_ulp.h"
10 #include "bnxt_tf_common.h"
11 #include "bnxt_tf_pmd_shim.h"
12 #include "ulp_rte_parser.h"
13 #include "ulp_matcher.h"
14 #include "ulp_utils.h"
15 #include "tfp.h"
16 #include "ulp_port_db.h"
17 #include "ulp_flow_db.h"
18 #include "ulp_mapper.h"
19 #include "ulp_tun.h"
20 #include "ulp_template_db_tbl.h"
21
22 /* Local defines for the parsing functions */
23 #define ULP_VLAN_PRIORITY_SHIFT         13 /* First 3 bits */
24 #define ULP_VLAN_PRIORITY_MASK          0x700
25 #define ULP_VLAN_TAG_MASK               0xFFF /* Last 12 bits*/
26 #define ULP_UDP_PORT_VXLAN              4789
27
28 /* Utility function to skip the void items. */
29 static inline int32_t
30 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
31 {
32         if (!*item)
33                 return 0;
34         if (increment)
35                 (*item)++;
36         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
37                 (*item)++;
38         if (*item)
39                 return 1;
40         return 0;
41 }
42
43 /* Utility function to copy field spec items */
44 static struct ulp_rte_hdr_field *
45 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
46                         const void *buffer,
47                         uint32_t size)
48 {
49         field->size = size;
50         memcpy(field->spec, buffer, field->size);
51         field++;
52         return field;
53 }
54
55 /* Utility function to update the field_bitmap */
56 static void
57 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
58                                    uint32_t idx,
59                                    enum bnxt_ulp_prsr_action prsr_act)
60 {
61         struct ulp_rte_hdr_field *field;
62
63         field = &params->hdr_field[idx];
64         if (ulp_bitmap_notzero(field->mask, field->size)) {
65                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
66                 if (!(prsr_act & ULP_PRSR_ACT_MATCH_IGNORE))
67                         ULP_INDEX_BITMAP_SET(params->fld_s_bitmap.bits, idx);
68                 /* Not exact match */
69                 if (!ulp_bitmap_is_ones(field->mask, field->size))
70                         ULP_COMP_FLD_IDX_WR(params,
71                                             BNXT_ULP_CF_IDX_WC_MATCH, 1);
72         } else {
73                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
74         }
75 }
76
77 #define ulp_deference_struct(x, y) ((x) ? &((x)->y) : NULL)
78 /* Utility function to copy field spec and masks items */
79 static void
80 ulp_rte_prsr_fld_mask(struct ulp_rte_parser_params *params,
81                       uint32_t *idx,
82                       uint32_t size,
83                       const void *spec_buff,
84                       const void *mask_buff,
85                       enum bnxt_ulp_prsr_action prsr_act)
86 {
87         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
88
89         /* update the field size */
90         field->size = size;
91
92         /* copy the mask specifications only if mask is not null */
93         if (!(prsr_act & ULP_PRSR_ACT_MASK_IGNORE) && mask_buff) {
94                 memcpy(field->mask, mask_buff, size);
95                 ulp_rte_parser_field_bitmap_update(params, *idx, prsr_act);
96         }
97
98         /* copy the protocol specifications only if mask is not null*/
99         if (spec_buff && mask_buff && ulp_bitmap_notzero(mask_buff, size))
100                 memcpy(field->spec, spec_buff, size);
101
102         /* Increment the index */
103         *idx = *idx + 1;
104 }
105
106 /* Utility function to copy field spec and masks items */
107 static int32_t
108 ulp_rte_prsr_fld_size_validate(struct ulp_rte_parser_params *params,
109                                uint32_t *idx,
110                                uint32_t size)
111 {
112         if (params->field_idx + size >= BNXT_ULP_PROTO_HDR_MAX) {
113                 BNXT_TF_DBG(ERR, "OOB for field processing %u\n", *idx);
114                 return -EINVAL;
115         }
116         *idx = params->field_idx;
117         params->field_idx += size;
118         return 0;
119 }
120
121 /*
122  * Function to handle the parsing of RTE Flows and placing
123  * the RTE flow items into the ulp structures.
124  */
125 int32_t
126 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
127                               struct ulp_rte_parser_params *params)
128 {
129         const struct rte_flow_item *item = pattern;
130         struct bnxt_ulp_rte_hdr_info *hdr_info;
131
132         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
133
134         /* Set the computed flags for no vlan tags before parsing */
135         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
136         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
137
138         /* Parse all the items in the pattern */
139         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
140                 if (item->type >= (uint32_t)
141                     BNXT_RTE_FLOW_ITEM_TYPE_END) {
142                         if (item->type >=
143                             (uint32_t)BNXT_RTE_FLOW_ITEM_TYPE_LAST)
144                                 goto hdr_parser_error;
145                         /* get the header information */
146                         hdr_info = &ulp_vendor_hdr_info[item->type -
147                                 BNXT_RTE_FLOW_ITEM_TYPE_END];
148                 } else {
149                         if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
150                                 goto hdr_parser_error;
151                         hdr_info = &ulp_hdr_info[item->type];
152                 }
153                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
154                         goto hdr_parser_error;
155                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
156                         /* call the registered callback handler */
157                         if (hdr_info->proto_hdr_func) {
158                                 if (hdr_info->proto_hdr_func(item, params) !=
159                                     BNXT_TF_RC_SUCCESS) {
160                                         return BNXT_TF_RC_ERROR;
161                                 }
162                         }
163                 }
164                 item++;
165         }
166         /* update the implied SVIF */
167         return ulp_rte_parser_implicit_match_port_process(params);
168
169 hdr_parser_error:
170         BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
171                     item->type);
172         return BNXT_TF_RC_PARSE_ERR;
173 }
174
175 /*
176  * Function to handle the parsing of RTE Flows and placing
177  * the RTE flow actions into the ulp structures.
178  */
179 int32_t
180 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
181                               struct ulp_rte_parser_params *params)
182 {
183         const struct rte_flow_action *action_item = actions;
184         struct bnxt_ulp_rte_act_info *hdr_info;
185
186         /* Parse all the items in the pattern */
187         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
188                 if (action_item->type >=
189                     (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_END) {
190                         if (action_item->type >=
191                             (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_LAST)
192                                 goto act_parser_error;
193                         /* get the header information from bnxt actinfo table */
194                         hdr_info = &ulp_vendor_act_info[action_item->type -
195                                 BNXT_RTE_FLOW_ACTION_TYPE_END];
196                 } else {
197                         if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
198                                 goto act_parser_error;
199                         /* get the header information from the act info table */
200                         hdr_info = &ulp_act_info[action_item->type];
201                 }
202                 if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
203                         goto act_parser_error;
204                 } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
205                         /* call the registered callback handler */
206                         if (hdr_info->proto_act_func) {
207                                 if (hdr_info->proto_act_func(action_item,
208                                                              params) !=
209                                     BNXT_TF_RC_SUCCESS) {
210                                         return BNXT_TF_RC_ERROR;
211                                 }
212                         }
213                 }
214                 action_item++;
215         }
216         /* update the implied port details */
217         ulp_rte_parser_implicit_act_port_process(params);
218         return BNXT_TF_RC_SUCCESS;
219
220 act_parser_error:
221         BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
222                     action_item->type);
223         return BNXT_TF_RC_ERROR;
224 }
225
226 /*
227  * Function to handle the post processing of the computed
228  * fields for the interface.
229  */
230 static void
231 bnxt_ulp_comp_fld_intf_update(struct ulp_rte_parser_params *params)
232 {
233         uint32_t ifindex;
234         uint16_t port_id, parif;
235         uint32_t mtype;
236         enum bnxt_ulp_direction_type dir;
237
238         /* get the direction details */
239         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
240
241         /* read the port id details */
242         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
243         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
244                                               port_id,
245                                               &ifindex)) {
246                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
247                 return;
248         }
249
250         if (dir == BNXT_ULP_DIR_INGRESS) {
251                 /* Set port PARIF */
252                 if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
253                                           BNXT_ULP_PHY_PORT_PARIF, &parif)) {
254                         BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
255                         return;
256                 }
257                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
258                                     parif);
259         } else {
260                 /* Get the match port type */
261                 mtype = ULP_COMP_FLD_IDX_RD(params,
262                                             BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
263                 if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
264                         ULP_COMP_FLD_IDX_WR(params,
265                                             BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
266                                             1);
267                         /* Set VF func PARIF */
268                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
269                                                   BNXT_ULP_VF_FUNC_PARIF,
270                                                   &parif)) {
271                                 BNXT_TF_DBG(ERR,
272                                             "ParseErr:ifindex is not valid\n");
273                                 return;
274                         }
275                         ULP_COMP_FLD_IDX_WR(params,
276                                             BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
277                                             parif);
278
279                 } else {
280                         /* Set DRV func PARIF */
281                         if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
282                                                   BNXT_ULP_DRV_FUNC_PARIF,
283                                                   &parif)) {
284                                 BNXT_TF_DBG(ERR,
285                                             "ParseErr:ifindex is not valid\n");
286                                 return;
287                         }
288                         ULP_COMP_FLD_IDX_WR(params,
289                                             BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
290                                             parif);
291                 }
292                 if (mtype == BNXT_ULP_INTF_TYPE_PF) {
293                         ULP_COMP_FLD_IDX_WR(params,
294                                             BNXT_ULP_CF_IDX_MATCH_PORT_IS_PF,
295                                             1);
296                 }
297         }
298 }
299
300 static int32_t
301 ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
302 {
303         enum bnxt_ulp_intf_type match_port_type, act_port_type;
304         enum bnxt_ulp_direction_type dir;
305         uint32_t act_port_set;
306
307         /* Get the computed details */
308         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
309         match_port_type = ULP_COMP_FLD_IDX_RD(params,
310                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
311         act_port_type = ULP_COMP_FLD_IDX_RD(params,
312                                             BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
313         act_port_set = ULP_COMP_FLD_IDX_RD(params,
314                                            BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
315
316         /* set the flow direction in the proto and action header */
317         if (dir == BNXT_ULP_DIR_EGRESS) {
318                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
319                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
320                 ULP_BITMAP_SET(params->act_bitmap.bits,
321                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
322         }
323
324         /* calculate the VF to VF flag */
325         if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
326             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
327                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
328
329         /* Update the decrement ttl computational fields */
330         if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
331                              BNXT_ULP_ACT_BIT_DEC_TTL)) {
332                 /*
333                  * Check that vxlan proto is included and vxlan decap
334                  * action is not set then decrement tunnel ttl.
335                  * Similarly add GRE and NVGRE in future.
336                  */
337                 if ((ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
338                                       BNXT_ULP_HDR_BIT_T_VXLAN) &&
339                     !ULP_BITMAP_ISSET(params->act_bitmap.bits,
340                                       BNXT_ULP_ACT_BIT_VXLAN_DECAP))) {
341                         ULP_COMP_FLD_IDX_WR(params,
342                                             BNXT_ULP_CF_IDX_ACT_T_DEC_TTL, 1);
343                 } else {
344                         ULP_COMP_FLD_IDX_WR(params,
345                                             BNXT_ULP_CF_IDX_ACT_DEC_TTL, 1);
346                 }
347         }
348
349         /* Merge the hdr_fp_bit into the proto header bit */
350         params->hdr_bitmap.bits |= params->hdr_fp_bit.bits;
351
352         /* Update the comp fld fid */
353         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_FID, params->fid);
354
355         /* Update the computed interface parameters */
356         bnxt_ulp_comp_fld_intf_update(params);
357
358         /* TBD: Handle the flow rejection scenarios */
359         return 0;
360 }
361
362 /*
363  * Function to handle the post processing of the parsing details
364  */
365 void
366 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
367 {
368         ulp_post_process_normal_flow(params);
369 }
370
371 /*
372  * Function to compute the flow direction based on the match port details
373  */
374 static void
375 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
376 {
377         enum bnxt_ulp_intf_type match_port_type;
378
379         /* Get the match port type */
380         match_port_type = ULP_COMP_FLD_IDX_RD(params,
381                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
382
383         /* If ingress flow and matchport is vf rep then dir is egress*/
384         if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
385             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
386                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
387                                     BNXT_ULP_DIR_EGRESS);
388         } else {
389                 /* Assign the input direction */
390                 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
391                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
392                                             BNXT_ULP_DIR_INGRESS);
393                 else
394                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
395                                             BNXT_ULP_DIR_EGRESS);
396         }
397 }
398
399 /* Function to handle the parsing of RTE Flow item PF Header. */
400 static int32_t
401 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
402                         uint32_t ifindex,
403                         uint16_t mask)
404 {
405         uint16_t svif;
406         enum bnxt_ulp_direction_type dir;
407         struct ulp_rte_hdr_field *hdr_field;
408         enum bnxt_ulp_svif_type svif_type;
409         enum bnxt_ulp_intf_type port_type;
410
411         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
412             BNXT_ULP_INVALID_SVIF_VAL) {
413                 BNXT_TF_DBG(ERR,
414                             "SVIF already set,multiple source not support'd\n");
415                 return BNXT_TF_RC_ERROR;
416         }
417
418         /* Get port type details */
419         port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
420         if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
421                 BNXT_TF_DBG(ERR, "Invalid port type\n");
422                 return BNXT_TF_RC_ERROR;
423         }
424
425         /* Update the match port type */
426         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
427
428         /* compute the direction */
429         bnxt_ulp_rte_parser_direction_compute(params);
430
431         /* Get the computed direction */
432         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
433         if (dir == BNXT_ULP_DIR_INGRESS) {
434                 svif_type = BNXT_ULP_PHY_PORT_SVIF;
435         } else {
436                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
437                         svif_type = BNXT_ULP_VF_FUNC_SVIF;
438                 else
439                         svif_type = BNXT_ULP_DRV_FUNC_SVIF;
440         }
441         ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
442                              &svif);
443         svif = rte_cpu_to_be_16(svif);
444         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
445         memcpy(hdr_field->spec, &svif, sizeof(svif));
446         memcpy(hdr_field->mask, &mask, sizeof(mask));
447         hdr_field->size = sizeof(svif);
448         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
449                             rte_be_to_cpu_16(svif));
450         return BNXT_TF_RC_SUCCESS;
451 }
452
453 /* Function to handle the parsing of the RTE port id */
454 int32_t
455 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
456 {
457         uint16_t port_id = 0;
458         uint16_t svif_mask = 0xFFFF;
459         uint32_t ifindex;
460         int32_t rc = BNXT_TF_RC_ERROR;
461
462         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
463             BNXT_ULP_INVALID_SVIF_VAL)
464                 return BNXT_TF_RC_SUCCESS;
465
466         /* SVIF not set. So get the port id */
467         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
468
469         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
470                                               port_id,
471                                               &ifindex)) {
472                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
473                 return rc;
474         }
475
476         /* Update the SVIF details */
477         rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
478         return rc;
479 }
480
481 /* Function to handle the implicit action port id */
482 int32_t
483 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
484 {
485         struct rte_flow_action action_item = {0};
486         struct rte_flow_action_port_id port_id = {0};
487
488         /* Read the action port set bit */
489         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
490                 /* Already set, so just exit */
491                 return BNXT_TF_RC_SUCCESS;
492         }
493         port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
494         action_item.conf = &port_id;
495
496         /* Update the action port based on incoming port */
497         ulp_rte_port_id_act_handler(&action_item, params);
498
499         /* Reset the action port set bit */
500         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
501         return BNXT_TF_RC_SUCCESS;
502 }
503
504 /* Function to handle the parsing of RTE Flow item PF Header. */
505 int32_t
506 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
507                        struct ulp_rte_parser_params *params)
508 {
509         uint16_t port_id = 0;
510         uint16_t svif_mask = 0xFFFF;
511         uint32_t ifindex;
512
513         /* Get the implicit port id */
514         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
515
516         /* perform the conversion from dpdk port to bnxt ifindex */
517         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
518                                               port_id,
519                                               &ifindex)) {
520                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
521                 return BNXT_TF_RC_ERROR;
522         }
523
524         /* Update the SVIF details */
525         return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
526 }
527
528 /* Function to handle the parsing of RTE Flow item VF Header. */
529 int32_t
530 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
531                        struct ulp_rte_parser_params *params)
532 {
533         const struct rte_flow_item_vf *vf_spec = item->spec;
534         const struct rte_flow_item_vf *vf_mask = item->mask;
535         uint16_t mask = 0;
536         uint32_t ifindex;
537         int32_t rc = BNXT_TF_RC_PARSE_ERR;
538
539         /* Get VF rte_flow_item for Port details */
540         if (!vf_spec) {
541                 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
542                 return rc;
543         }
544         if (!vf_mask) {
545                 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
546                 return rc;
547         }
548         mask = vf_mask->id;
549
550         /* perform the conversion from VF Func id to bnxt ifindex */
551         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
552                                                  vf_spec->id,
553                                                  &ifindex)) {
554                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
555                 return rc;
556         }
557         /* Update the SVIF details */
558         return ulp_rte_parser_svif_set(params, ifindex, mask);
559 }
560
561 /* Function to handle the parsing of RTE Flow item port id  Header. */
562 int32_t
563 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
564                             struct ulp_rte_parser_params *params)
565 {
566         const struct rte_flow_item_port_id *port_spec = item->spec;
567         const struct rte_flow_item_port_id *port_mask = item->mask;
568         uint16_t mask = 0;
569         int32_t rc = BNXT_TF_RC_PARSE_ERR;
570         uint32_t ifindex;
571
572         if (!port_spec) {
573                 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
574                 return rc;
575         }
576         if (!port_mask) {
577                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
578                 return rc;
579         }
580         mask = port_mask->id;
581
582         /* perform the conversion from dpdk port to bnxt ifindex */
583         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
584                                               port_spec->id,
585                                               &ifindex)) {
586                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
587                 return rc;
588         }
589         /* Update the SVIF details */
590         return ulp_rte_parser_svif_set(params, ifindex, mask);
591 }
592
593 /* Function to handle the parsing of RTE Flow item phy port Header. */
594 int32_t
595 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
596                              struct ulp_rte_parser_params *params)
597 {
598         const struct rte_flow_item_phy_port *port_spec = item->spec;
599         const struct rte_flow_item_phy_port *port_mask = item->mask;
600         uint16_t mask = 0;
601         int32_t rc = BNXT_TF_RC_ERROR;
602         uint16_t svif;
603         enum bnxt_ulp_direction_type dir;
604         struct ulp_rte_hdr_field *hdr_field;
605
606         /* Copy the rte_flow_item for phy port into hdr_field */
607         if (!port_spec) {
608                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
609                 return rc;
610         }
611         if (!port_mask) {
612                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
613                 return rc;
614         }
615         mask = port_mask->index;
616
617         /* Update the match port type */
618         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
619                             BNXT_ULP_INTF_TYPE_PHY_PORT);
620
621         /* Compute the Hw direction */
622         bnxt_ulp_rte_parser_direction_compute(params);
623
624         /* Direction validation */
625         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
626         if (dir == BNXT_ULP_DIR_EGRESS) {
627                 BNXT_TF_DBG(ERR,
628                             "Parse Err:Phy ports are valid only for ingress\n");
629                 return BNXT_TF_RC_PARSE_ERR;
630         }
631
632         /* Get the physical port details from port db */
633         rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
634                                            &svif);
635         if (rc) {
636                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
637                 return BNXT_TF_RC_PARSE_ERR;
638         }
639
640         /* Update the SVIF details */
641         svif = rte_cpu_to_be_16(svif);
642         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
643         memcpy(hdr_field->spec, &svif, sizeof(svif));
644         memcpy(hdr_field->mask, &mask, sizeof(mask));
645         hdr_field->size = sizeof(svif);
646         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
647                             rte_be_to_cpu_16(svif));
648         return BNXT_TF_RC_SUCCESS;
649 }
650
651 /* Function to handle the update of proto header based on field values */
652 static void
653 ulp_rte_l2_proto_type_update(struct ulp_rte_parser_params *param,
654                              uint16_t type, uint32_t in_flag)
655 {
656         if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV4)) {
657                 if (in_flag) {
658                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
659                                        BNXT_ULP_HDR_BIT_I_IPV4);
660                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
661                 } else {
662                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
663                                        BNXT_ULP_HDR_BIT_O_IPV4);
664                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
665                 }
666         } else if (type == tfp_cpu_to_be_16(RTE_ETHER_TYPE_IPV6))  {
667                 if (in_flag) {
668                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
669                                        BNXT_ULP_HDR_BIT_I_IPV6);
670                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L3, 1);
671                 } else {
672                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
673                                        BNXT_ULP_HDR_BIT_O_IPV6);
674                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L3, 1);
675                 }
676         }
677 }
678
679 /* Internal Function to identify broadcast or multicast packets */
680 static int32_t
681 ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
682 {
683         if (rte_is_multicast_ether_addr(eth_addr) ||
684             rte_is_broadcast_ether_addr(eth_addr)) {
685                 BNXT_TF_DBG(DEBUG,
686                             "No support for bcast or mcast addr offload\n");
687                 return 1;
688         }
689         return 0;
690 }
691
692 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
693 int32_t
694 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
695                         struct ulp_rte_parser_params *params)
696 {
697         const struct rte_flow_item_eth *eth_spec = item->spec;
698         const struct rte_flow_item_eth *eth_mask = item->mask;
699         uint32_t idx = 0, dmac_idx = 0;
700         uint32_t size;
701         uint16_t eth_type = 0;
702         uint32_t inner_flag = 0;
703
704         /* Perform validations */
705         if (eth_spec) {
706                 /* Todo: work around to avoid multicast and broadcast addr */
707                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->dst))
708                         return BNXT_TF_RC_PARSE_ERR;
709
710                 if (ulp_rte_parser_is_bcmc_addr(&eth_spec->src))
711                         return BNXT_TF_RC_PARSE_ERR;
712
713                 eth_type = eth_spec->type;
714         }
715
716         if (ulp_rte_prsr_fld_size_validate(params, &idx,
717                                            BNXT_ULP_PROTO_HDR_ETH_NUM)) {
718                 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
719                 return BNXT_TF_RC_ERROR;
720         }
721         /*
722          * Copy the rte_flow_item for eth into hdr_field using ethernet
723          * header fields
724          */
725         dmac_idx = idx;
726         size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
727         ulp_rte_prsr_fld_mask(params, &idx, size,
728                               ulp_deference_struct(eth_spec, dst.addr_bytes),
729                               ulp_deference_struct(eth_mask, dst.addr_bytes),
730                               ULP_PRSR_ACT_DEFAULT);
731
732         size = sizeof(((struct rte_flow_item_eth *)NULL)->src.addr_bytes);
733         ulp_rte_prsr_fld_mask(params, &idx, size,
734                               ulp_deference_struct(eth_spec, src.addr_bytes),
735                               ulp_deference_struct(eth_mask, src.addr_bytes),
736                               ULP_PRSR_ACT_DEFAULT);
737
738         size = sizeof(((struct rte_flow_item_eth *)NULL)->type);
739         ulp_rte_prsr_fld_mask(params, &idx, size,
740                               ulp_deference_struct(eth_spec, type),
741                               ulp_deference_struct(eth_mask, type),
742                               ULP_PRSR_ACT_MATCH_IGNORE);
743
744         /* Update the protocol hdr bitmap */
745         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
746                              BNXT_ULP_HDR_BIT_O_ETH) ||
747             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
748                              BNXT_ULP_HDR_BIT_O_IPV4) ||
749             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
750                              BNXT_ULP_HDR_BIT_O_IPV6) ||
751             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
752                              BNXT_ULP_HDR_BIT_O_UDP) ||
753             ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
754                              BNXT_ULP_HDR_BIT_O_TCP)) {
755                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
756                 inner_flag = 1;
757         } else {
758                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
759                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
760                                     dmac_idx);
761         }
762         /* Update the field protocol hdr bitmap */
763         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
764
765         return BNXT_TF_RC_SUCCESS;
766 }
767
768 /* Function to handle the parsing of RTE Flow item Vlan Header. */
769 int32_t
770 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
771                          struct ulp_rte_parser_params *params)
772 {
773         const struct rte_flow_item_vlan *vlan_spec = item->spec;
774         const struct rte_flow_item_vlan *vlan_mask = item->mask;
775         struct ulp_rte_hdr_bitmap       *hdr_bit;
776         uint32_t idx = 0;
777         uint16_t vlan_tag = 0, priority = 0;
778         uint16_t vlan_tag_mask = 0, priority_mask = 0;
779         uint32_t outer_vtag_num;
780         uint32_t inner_vtag_num;
781         uint16_t eth_type = 0;
782         uint32_t inner_flag = 0;
783         uint32_t size;
784
785         if (vlan_spec) {
786                 vlan_tag = ntohs(vlan_spec->tci);
787                 priority = htons(vlan_tag >> ULP_VLAN_PRIORITY_SHIFT);
788                 vlan_tag &= ULP_VLAN_TAG_MASK;
789                 vlan_tag = htons(vlan_tag);
790                 eth_type = vlan_spec->inner_type;
791         }
792
793         if (vlan_mask) {
794                 vlan_tag_mask = ntohs(vlan_mask->tci);
795                 priority_mask = htons(vlan_tag_mask >> ULP_VLAN_PRIORITY_SHIFT);
796                 vlan_tag_mask &= 0xfff;
797
798                 /*
799                  * the storage for priority and vlan tag is 2 bytes
800                  * The mask of priority which is 3 bits if it is all 1's
801                  * then make the rest bits 13 bits as 1's
802                  * so that it is matched as exact match.
803                  */
804                 if (priority_mask == ULP_VLAN_PRIORITY_MASK)
805                         priority_mask |= ~ULP_VLAN_PRIORITY_MASK;
806                 if (vlan_tag_mask == ULP_VLAN_TAG_MASK)
807                         vlan_tag_mask |= ~ULP_VLAN_TAG_MASK;
808                 vlan_tag_mask = htons(vlan_tag_mask);
809         }
810
811         if (ulp_rte_prsr_fld_size_validate(params, &idx,
812                                            BNXT_ULP_PROTO_HDR_S_VLAN_NUM)) {
813                 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
814                 return BNXT_TF_RC_ERROR;
815         }
816
817         /*
818          * Copy the rte_flow_item for vlan into hdr_field using Vlan
819          * header fields
820          */
821         size = sizeof(((struct rte_flow_item_vlan *)NULL)->tci);
822         /*
823          * The priority field is ignored since OVS is setting it as
824          * wild card match and it is not supported. This is a work
825          * around and shall be addressed in the future.
826          */
827         ulp_rte_prsr_fld_mask(params, &idx, size,
828                               &priority,
829                               (vlan_mask) ? &priority_mask : NULL,
830                               ULP_PRSR_ACT_MASK_IGNORE);
831
832         ulp_rte_prsr_fld_mask(params, &idx, size,
833                               &vlan_tag,
834                               (vlan_mask) ? &vlan_tag_mask : NULL,
835                               ULP_PRSR_ACT_DEFAULT);
836
837         size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
838         ulp_rte_prsr_fld_mask(params, &idx, size,
839                               ulp_deference_struct(vlan_spec, inner_type),
840                               ulp_deference_struct(vlan_mask, inner_type),
841                               ULP_PRSR_ACT_MATCH_IGNORE);
842
843         /* Get the outer tag and inner tag counts */
844         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
845                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
846         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
847                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
848
849         /* Update the hdr_bitmap of the vlans */
850         hdr_bit = &params->hdr_bitmap;
851         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
852             !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
853             !outer_vtag_num) {
854                 /* Update the vlan tag num */
855                 outer_vtag_num++;
856                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
857                                     outer_vtag_num);
858                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
859                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
860                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
861                                BNXT_ULP_HDR_BIT_OO_VLAN);
862                 if (vlan_mask && vlan_tag_mask)
863                         ULP_COMP_FLD_IDX_WR(params,
864                                             BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
865
866         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
867                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
868                    outer_vtag_num == 1) {
869                 /* update the vlan tag num */
870                 outer_vtag_num++;
871                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
872                                     outer_vtag_num);
873                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
874                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
875                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
876                                BNXT_ULP_HDR_BIT_OI_VLAN);
877                 if (vlan_mask && vlan_tag_mask)
878                         ULP_COMP_FLD_IDX_WR(params,
879                                             BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
880
881         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
882                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
883                    !inner_vtag_num) {
884                 /* update the vlan tag num */
885                 inner_vtag_num++;
886                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
887                                     inner_vtag_num);
888                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
889                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
890                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
891                                BNXT_ULP_HDR_BIT_IO_VLAN);
892                 if (vlan_mask && vlan_tag_mask)
893                         ULP_COMP_FLD_IDX_WR(params,
894                                             BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
895                 inner_flag = 1;
896         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
897                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
898                    inner_vtag_num == 1) {
899                 /* update the vlan tag num */
900                 inner_vtag_num++;
901                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
902                                     inner_vtag_num);
903                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
904                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
905                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
906                                BNXT_ULP_HDR_BIT_II_VLAN);
907                 if (vlan_mask && vlan_tag_mask)
908                         ULP_COMP_FLD_IDX_WR(params,
909                                             BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
910                 inner_flag = 1;
911         } else {
912                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
913                 return BNXT_TF_RC_ERROR;
914         }
915         /* Update the field protocol hdr bitmap */
916         ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
917         return BNXT_TF_RC_SUCCESS;
918 }
919
920 /* Function to handle the update of proto header based on field values */
921 static void
922 ulp_rte_l3_proto_type_update(struct ulp_rte_parser_params *param,
923                              uint8_t proto, uint32_t in_flag)
924 {
925         if (proto == IPPROTO_UDP) {
926                 if (in_flag) {
927                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
928                                        BNXT_ULP_HDR_BIT_I_UDP);
929                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
930                 } else {
931                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
932                                        BNXT_ULP_HDR_BIT_O_UDP);
933                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
934                 }
935         } else if (proto == IPPROTO_TCP) {
936                 if (in_flag) {
937                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
938                                        BNXT_ULP_HDR_BIT_I_TCP);
939                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_I_L4, 1);
940                 } else {
941                         ULP_BITMAP_SET(param->hdr_fp_bit.bits,
942                                        BNXT_ULP_HDR_BIT_O_TCP);
943                         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_O_L4, 1);
944                 }
945         } else if (proto == IPPROTO_GRE) {
946                 ULP_BITMAP_SET(param->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_GRE);
947         } else if (proto == IPPROTO_ICMP) {
948                 if (ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_L3_TUN))
949                         ULP_BITMAP_SET(param->hdr_bitmap.bits,
950                                        BNXT_ULP_HDR_BIT_I_ICMP);
951                 else
952                         ULP_BITMAP_SET(param->hdr_bitmap.bits,
953                                        BNXT_ULP_HDR_BIT_O_ICMP);
954         }
955         if (proto) {
956                 if (in_flag) {
957                         ULP_COMP_FLD_IDX_WR(param,
958                                             BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
959                                             1);
960                         ULP_COMP_FLD_IDX_WR(param,
961                                             BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
962                                             proto);
963                 } else {
964                         ULP_COMP_FLD_IDX_WR(param,
965                                             BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
966                                             1);
967                         ULP_COMP_FLD_IDX_WR(param,
968                                             BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
969                                             proto);
970                 }
971         }
972 }
973
974 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
975 int32_t
976 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
977                          struct ulp_rte_parser_params *params)
978 {
979         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
980         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
981         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
982         uint32_t idx = 0, dip_idx = 0;
983         uint32_t size;
984         uint8_t proto = 0;
985         uint32_t inner_flag = 0;
986         uint32_t cnt;
987
988         /* validate there are no 3rd L3 header */
989         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
990         if (cnt == 2) {
991                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
992                 return BNXT_TF_RC_ERROR;
993         }
994
995         if (ulp_rte_prsr_fld_size_validate(params, &idx,
996                                            BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
997                 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
998                 return BNXT_TF_RC_ERROR;
999         }
1000
1001         /*
1002          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1003          * header fields
1004          */
1005         size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.version_ihl);
1006         ulp_rte_prsr_fld_mask(params, &idx, size,
1007                               ulp_deference_struct(ipv4_spec, hdr.version_ihl),
1008                               ulp_deference_struct(ipv4_mask, hdr.version_ihl),
1009                               ULP_PRSR_ACT_DEFAULT);
1010
1011         /*
1012          * The tos field is ignored since OVS is setting it as wild card
1013          * match and it is not supported. This is a work around and
1014          * shall be addressed in the future.
1015          */
1016         size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.type_of_service);
1017         ulp_rte_prsr_fld_mask(params, &idx, size,
1018                               ulp_deference_struct(ipv4_spec,
1019                                                    hdr.type_of_service),
1020                               ulp_deference_struct(ipv4_mask,
1021                                                    hdr.type_of_service),
1022                               ULP_PRSR_ACT_MASK_IGNORE);
1023
1024         size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.total_length);
1025         ulp_rte_prsr_fld_mask(params, &idx, size,
1026                               ulp_deference_struct(ipv4_spec, hdr.total_length),
1027                               ulp_deference_struct(ipv4_mask, hdr.total_length),
1028                               ULP_PRSR_ACT_DEFAULT);
1029
1030         size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.packet_id);
1031         ulp_rte_prsr_fld_mask(params, &idx, size,
1032                               ulp_deference_struct(ipv4_spec, hdr.packet_id),
1033                               ulp_deference_struct(ipv4_mask, hdr.packet_id),
1034                               ULP_PRSR_ACT_DEFAULT);
1035
1036         size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.fragment_offset);
1037         ulp_rte_prsr_fld_mask(params, &idx, size,
1038                               ulp_deference_struct(ipv4_spec,
1039                                                    hdr.fragment_offset),
1040                               ulp_deference_struct(ipv4_mask,
1041                                                    hdr.fragment_offset),
1042                               ULP_PRSR_ACT_DEFAULT);
1043
1044         size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.time_to_live);
1045         ulp_rte_prsr_fld_mask(params, &idx, size,
1046                               ulp_deference_struct(ipv4_spec, hdr.time_to_live),
1047                               ulp_deference_struct(ipv4_mask, hdr.time_to_live),
1048                               ULP_PRSR_ACT_DEFAULT);
1049
1050         /* Ignore proto for matching templates */
1051         size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.next_proto_id);
1052         ulp_rte_prsr_fld_mask(params, &idx, size,
1053                               ulp_deference_struct(ipv4_spec,
1054                                                    hdr.next_proto_id),
1055                               ulp_deference_struct(ipv4_mask,
1056                                                    hdr.next_proto_id),
1057                               ULP_PRSR_ACT_MATCH_IGNORE);
1058         if (ipv4_spec)
1059                 proto = ipv4_spec->hdr.next_proto_id;
1060
1061         size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.hdr_checksum);
1062         ulp_rte_prsr_fld_mask(params, &idx, size,
1063                               ulp_deference_struct(ipv4_spec, hdr.hdr_checksum),
1064                               ulp_deference_struct(ipv4_mask, hdr.hdr_checksum),
1065                               ULP_PRSR_ACT_DEFAULT);
1066
1067         size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.src_addr);
1068         ulp_rte_prsr_fld_mask(params, &idx, size,
1069                               ulp_deference_struct(ipv4_spec, hdr.src_addr),
1070                               ulp_deference_struct(ipv4_mask, hdr.src_addr),
1071                               ULP_PRSR_ACT_DEFAULT);
1072
1073         dip_idx = idx;
1074         size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
1075         ulp_rte_prsr_fld_mask(params, &idx, size,
1076                               ulp_deference_struct(ipv4_spec, hdr.dst_addr),
1077                               ulp_deference_struct(ipv4_mask, hdr.dst_addr),
1078                               ULP_PRSR_ACT_DEFAULT);
1079
1080         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
1081         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1082             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1083                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
1084                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1085                 inner_flag = 1;
1086         } else {
1087                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
1088                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1089                 /* Update the tunnel offload dest ip offset */
1090                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1091                                     dip_idx);
1092         }
1093
1094         /* Some of the PMD applications may set the protocol field
1095          * in the IPv4 spec but don't set the mask. So, consider
1096          * the mask in the proto value calculation.
1097          */
1098         if (ipv4_mask)
1099                 proto &= ipv4_mask->hdr.next_proto_id;
1100
1101         /* Update the field protocol hdr bitmap */
1102         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1103         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1104         return BNXT_TF_RC_SUCCESS;
1105 }
1106
1107 /* Function to handle the parsing of RTE Flow item IPV6 Header */
1108 int32_t
1109 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
1110                          struct ulp_rte_parser_params *params)
1111 {
1112         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
1113         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
1114         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1115         uint32_t idx = 0, dip_idx = 0;
1116         uint32_t size;
1117         uint32_t ver_spec = 0, ver_mask = 0;
1118         uint32_t tc_spec = 0, tc_mask = 0;
1119         uint32_t lab_spec = 0, lab_mask = 0;
1120         uint8_t proto = 0;
1121         uint32_t inner_flag = 0;
1122         uint32_t cnt;
1123
1124         /* validate there are no 3rd L3 header */
1125         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_HDR_CNT);
1126         if (cnt == 2) {
1127                 BNXT_TF_DBG(ERR, "Parse Err:Third L3 header not supported\n");
1128                 return BNXT_TF_RC_ERROR;
1129         }
1130
1131         if (ulp_rte_prsr_fld_size_validate(params, &idx,
1132                                            BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
1133                 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1134                 return BNXT_TF_RC_ERROR;
1135         }
1136
1137         /*
1138          * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
1139          * header fields
1140          */
1141         if (ipv6_spec) {
1142                 ver_spec = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
1143                 tc_spec = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
1144                 lab_spec = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
1145                 proto = ipv6_spec->hdr.proto;
1146         }
1147
1148         if (ipv6_mask) {
1149                 ver_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
1150                 tc_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
1151                 lab_mask = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
1152
1153                 /* Some of the PMD applications may set the protocol field
1154                  * in the IPv6 spec but don't set the mask. So, consider
1155                  * the mask in proto value calculation.
1156                  */
1157                 proto &= ipv6_mask->hdr.proto;
1158         }
1159
1160         size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.vtc_flow);
1161         ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
1162                               ULP_PRSR_ACT_DEFAULT);
1163         /*
1164          * The TC and flow label field are ignored since OVS is
1165          * setting it for match and it is not supported.
1166          * This is a work around and
1167          * shall be addressed in the future.
1168          */
1169         ulp_rte_prsr_fld_mask(params, &idx, size, &tc_spec, &tc_mask,
1170                               ULP_PRSR_ACT_MASK_IGNORE);
1171         ulp_rte_prsr_fld_mask(params, &idx, size, &lab_spec, &lab_mask,
1172                               ULP_PRSR_ACT_MASK_IGNORE);
1173
1174         size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.payload_len);
1175         ulp_rte_prsr_fld_mask(params, &idx, size,
1176                               ulp_deference_struct(ipv6_spec, hdr.payload_len),
1177                               ulp_deference_struct(ipv6_mask, hdr.payload_len),
1178                               ULP_PRSR_ACT_DEFAULT);
1179
1180         /* Ignore proto for template matching */
1181         size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.proto);
1182         ulp_rte_prsr_fld_mask(params, &idx, size,
1183                               ulp_deference_struct(ipv6_spec, hdr.proto),
1184                               ulp_deference_struct(ipv6_mask, hdr.proto),
1185                               ULP_PRSR_ACT_MATCH_IGNORE);
1186
1187         size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.hop_limits);
1188         ulp_rte_prsr_fld_mask(params, &idx, size,
1189                               ulp_deference_struct(ipv6_spec, hdr.hop_limits),
1190                               ulp_deference_struct(ipv6_mask, hdr.hop_limits),
1191                               ULP_PRSR_ACT_DEFAULT);
1192
1193         size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.src_addr);
1194         ulp_rte_prsr_fld_mask(params, &idx, size,
1195                               ulp_deference_struct(ipv6_spec, hdr.src_addr),
1196                               ulp_deference_struct(ipv6_mask, hdr.src_addr),
1197                               ULP_PRSR_ACT_DEFAULT);
1198
1199         dip_idx =  idx;
1200         size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
1201         ulp_rte_prsr_fld_mask(params, &idx, size,
1202                               ulp_deference_struct(ipv6_spec, hdr.dst_addr),
1203                               ulp_deference_struct(ipv6_mask, hdr.dst_addr),
1204                               ULP_PRSR_ACT_DEFAULT);
1205
1206         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
1207         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
1208             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
1209                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
1210                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
1211                 inner_flag = 1;
1212         } else {
1213                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
1214                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
1215                 /* Update the tunnel offload dest ip offset */
1216                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
1217                                     dip_idx);
1218         }
1219
1220         /* Update the field protocol hdr bitmap */
1221         ulp_rte_l3_proto_type_update(params, proto, inner_flag);
1222         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
1223
1224         return BNXT_TF_RC_SUCCESS;
1225 }
1226
1227 /* Function to handle the update of proto header based on field values */
1228 static void
1229 ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
1230                              uint16_t dst_port)
1231 {
1232         if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
1233                 ULP_BITMAP_SET(param->hdr_fp_bit.bits,
1234                                BNXT_ULP_HDR_BIT_T_VXLAN);
1235                 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1236         }
1237
1238         if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1239                              BNXT_ULP_HDR_BIT_T_VXLAN) ||
1240             ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
1241                              BNXT_ULP_HDR_BIT_T_GRE))
1242                 ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
1243 }
1244
1245 /* Function to handle the parsing of RTE Flow item UDP Header. */
1246 int32_t
1247 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
1248                         struct ulp_rte_parser_params *params)
1249 {
1250         const struct rte_flow_item_udp *udp_spec = item->spec;
1251         const struct rte_flow_item_udp *udp_mask = item->mask;
1252         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1253         uint32_t idx = 0;
1254         uint32_t size;
1255         uint16_t dport = 0, sport = 0;
1256         uint32_t cnt;
1257
1258         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1259         if (cnt == 2) {
1260                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1261                 return BNXT_TF_RC_ERROR;
1262         }
1263
1264         if (udp_spec) {
1265                 sport = udp_spec->hdr.src_port;
1266                 dport = udp_spec->hdr.dst_port;
1267         }
1268
1269         if (ulp_rte_prsr_fld_size_validate(params, &idx,
1270                                            BNXT_ULP_PROTO_HDR_UDP_NUM)) {
1271                 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1272                 return BNXT_TF_RC_ERROR;
1273         }
1274
1275         /*
1276          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1277          * header fields
1278          */
1279         size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.src_port);
1280         ulp_rte_prsr_fld_mask(params, &idx, size,
1281                               ulp_deference_struct(udp_spec, hdr.src_port),
1282                               ulp_deference_struct(udp_mask, hdr.src_port),
1283                               ULP_PRSR_ACT_DEFAULT);
1284
1285         size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dst_port);
1286         ulp_rte_prsr_fld_mask(params, &idx, size,
1287                               ulp_deference_struct(udp_spec, hdr.dst_port),
1288                               ulp_deference_struct(udp_mask, hdr.dst_port),
1289                               ULP_PRSR_ACT_DEFAULT);
1290
1291         size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_len);
1292         ulp_rte_prsr_fld_mask(params, &idx, size,
1293                               ulp_deference_struct(udp_spec, hdr.dgram_len),
1294                               ulp_deference_struct(udp_mask, hdr.dgram_len),
1295                               ULP_PRSR_ACT_DEFAULT);
1296
1297         size = sizeof(((struct rte_flow_item_udp *)NULL)->hdr.dgram_cksum);
1298         ulp_rte_prsr_fld_mask(params, &idx, size,
1299                               ulp_deference_struct(udp_spec, hdr.dgram_cksum),
1300                               ulp_deference_struct(udp_mask, hdr.dgram_cksum),
1301                               ULP_PRSR_ACT_DEFAULT);
1302
1303         /* Set the udp header bitmap and computed l4 header bitmaps */
1304         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1305             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1306                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
1307                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1308                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1309                                     (uint32_t)rte_be_to_cpu_16(sport));
1310                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1311                                     (uint32_t)rte_be_to_cpu_16(dport));
1312                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1313                                     1);
1314                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1315                                     IPPROTO_UDP);
1316                 if (udp_mask && udp_mask->hdr.src_port)
1317                         ULP_COMP_FLD_IDX_WR(params,
1318                                             BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1319                                             1);
1320                 if (udp_mask && udp_mask->hdr.dst_port)
1321                         ULP_COMP_FLD_IDX_WR(params,
1322                                             BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1323                                             1);
1324         } else {
1325                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
1326                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1327                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1328                                     (uint32_t)rte_be_to_cpu_16(sport));
1329                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1330                                     (uint32_t)rte_be_to_cpu_16(dport));
1331                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1332                                     1);
1333                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1334                                     IPPROTO_UDP);
1335                 if (udp_mask && udp_mask->hdr.src_port)
1336                         ULP_COMP_FLD_IDX_WR(params,
1337                                             BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1338                                             1);
1339                 if (udp_mask && udp_mask->hdr.dst_port)
1340                         ULP_COMP_FLD_IDX_WR(params,
1341                                             BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1342                                             1);
1343
1344                 /* Update the field protocol hdr bitmap */
1345                 ulp_rte_l4_proto_type_update(params, dport);
1346         }
1347         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1348         return BNXT_TF_RC_SUCCESS;
1349 }
1350
1351 /* Function to handle the parsing of RTE Flow item TCP Header. */
1352 int32_t
1353 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
1354                         struct ulp_rte_parser_params *params)
1355 {
1356         const struct rte_flow_item_tcp *tcp_spec = item->spec;
1357         const struct rte_flow_item_tcp *tcp_mask = item->mask;
1358         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1359         uint32_t idx = 0;
1360         uint16_t dport = 0, sport = 0;
1361         uint32_t size;
1362         uint32_t cnt;
1363
1364         cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
1365         if (cnt == 2) {
1366                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
1367                 return BNXT_TF_RC_ERROR;
1368         }
1369
1370         if (tcp_spec) {
1371                 sport = tcp_spec->hdr.src_port;
1372                 dport = tcp_spec->hdr.dst_port;
1373         }
1374
1375         if (ulp_rte_prsr_fld_size_validate(params, &idx,
1376                                            BNXT_ULP_PROTO_HDR_TCP_NUM)) {
1377                 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1378                 return BNXT_TF_RC_ERROR;
1379         }
1380
1381         /*
1382          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
1383          * header fields
1384          */
1385         size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.src_port);
1386         ulp_rte_prsr_fld_mask(params, &idx, size,
1387                               ulp_deference_struct(tcp_spec, hdr.src_port),
1388                               ulp_deference_struct(tcp_mask, hdr.src_port),
1389                               ULP_PRSR_ACT_DEFAULT);
1390
1391         size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.dst_port);
1392         ulp_rte_prsr_fld_mask(params, &idx, size,
1393                               ulp_deference_struct(tcp_spec, hdr.dst_port),
1394                               ulp_deference_struct(tcp_mask, hdr.dst_port),
1395                               ULP_PRSR_ACT_DEFAULT);
1396
1397         size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.sent_seq);
1398         ulp_rte_prsr_fld_mask(params, &idx, size,
1399                               ulp_deference_struct(tcp_spec, hdr.sent_seq),
1400                               ulp_deference_struct(tcp_mask, hdr.sent_seq),
1401                               ULP_PRSR_ACT_DEFAULT);
1402
1403         size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.recv_ack);
1404         ulp_rte_prsr_fld_mask(params, &idx, size,
1405                               ulp_deference_struct(tcp_spec, hdr.recv_ack),
1406                               ulp_deference_struct(tcp_mask, hdr.recv_ack),
1407                               ULP_PRSR_ACT_DEFAULT);
1408
1409         size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.data_off);
1410         ulp_rte_prsr_fld_mask(params, &idx, size,
1411                               ulp_deference_struct(tcp_spec, hdr.data_off),
1412                               ulp_deference_struct(tcp_mask, hdr.data_off),
1413                               ULP_PRSR_ACT_DEFAULT);
1414
1415         size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_flags);
1416         ulp_rte_prsr_fld_mask(params, &idx, size,
1417                               ulp_deference_struct(tcp_spec, hdr.tcp_flags),
1418                               ulp_deference_struct(tcp_mask, hdr.tcp_flags),
1419                               ULP_PRSR_ACT_DEFAULT);
1420
1421         size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.rx_win);
1422         ulp_rte_prsr_fld_mask(params, &idx, size,
1423                               ulp_deference_struct(tcp_spec, hdr.rx_win),
1424                               ulp_deference_struct(tcp_mask, hdr.rx_win),
1425                               ULP_PRSR_ACT_DEFAULT);
1426
1427         size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.cksum);
1428         ulp_rte_prsr_fld_mask(params, &idx, size,
1429                               ulp_deference_struct(tcp_spec, hdr.cksum),
1430                               ulp_deference_struct(tcp_mask, hdr.cksum),
1431                               ULP_PRSR_ACT_DEFAULT);
1432
1433         size = sizeof(((struct rte_flow_item_tcp *)NULL)->hdr.tcp_urp);
1434         ulp_rte_prsr_fld_mask(params, &idx, size,
1435                               ulp_deference_struct(tcp_spec, hdr.tcp_urp),
1436                               ulp_deference_struct(tcp_mask, hdr.tcp_urp),
1437                               ULP_PRSR_ACT_DEFAULT);
1438
1439         /* Set the udp header bitmap and computed l4 header bitmaps */
1440         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1441             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1442                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1443                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1444                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
1445                                     (uint32_t)rte_be_to_cpu_16(sport));
1446                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
1447                                     (uint32_t)rte_be_to_cpu_16(dport));
1448                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
1449                                     1);
1450                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
1451                                     IPPROTO_TCP);
1452                 if (tcp_mask && tcp_mask->hdr.src_port)
1453                         ULP_COMP_FLD_IDX_WR(params,
1454                                             BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
1455                                             1);
1456                 if (tcp_mask && tcp_mask->hdr.dst_port)
1457                         ULP_COMP_FLD_IDX_WR(params,
1458                                             BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
1459                                             1);
1460         } else {
1461                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1462                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1463                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
1464                                     (uint32_t)rte_be_to_cpu_16(sport));
1465                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
1466                                     (uint32_t)rte_be_to_cpu_16(dport));
1467                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
1468                                     1);
1469                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
1470                                     IPPROTO_TCP);
1471                 if (tcp_mask && tcp_mask->hdr.src_port)
1472                         ULP_COMP_FLD_IDX_WR(params,
1473                                             BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
1474                                             1);
1475                 if (tcp_mask && tcp_mask->hdr.dst_port)
1476                         ULP_COMP_FLD_IDX_WR(params,
1477                                             BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
1478                                             1);
1479         }
1480         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
1481         return BNXT_TF_RC_SUCCESS;
1482 }
1483
1484 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1485 int32_t
1486 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1487                           struct ulp_rte_parser_params *params)
1488 {
1489         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1490         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1491         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1492         uint32_t idx = 0;
1493         uint32_t size;
1494
1495         if (ulp_rte_prsr_fld_size_validate(params, &idx,
1496                                            BNXT_ULP_PROTO_HDR_VXLAN_NUM)) {
1497                 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1498                 return BNXT_TF_RC_ERROR;
1499         }
1500
1501         /*
1502          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1503          * header fields
1504          */
1505         size = sizeof(((struct rte_flow_item_vxlan *)NULL)->flags);
1506         ulp_rte_prsr_fld_mask(params, &idx, size,
1507                               ulp_deference_struct(vxlan_spec, flags),
1508                               ulp_deference_struct(vxlan_mask, flags),
1509                               ULP_PRSR_ACT_DEFAULT);
1510
1511         size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd0);
1512         ulp_rte_prsr_fld_mask(params, &idx, size,
1513                               ulp_deference_struct(vxlan_spec, rsvd0),
1514                               ulp_deference_struct(vxlan_mask, rsvd0),
1515                               ULP_PRSR_ACT_DEFAULT);
1516
1517         size = sizeof(((struct rte_flow_item_vxlan *)NULL)->vni);
1518         ulp_rte_prsr_fld_mask(params, &idx, size,
1519                               ulp_deference_struct(vxlan_spec, vni),
1520                               ulp_deference_struct(vxlan_mask, vni),
1521                               ULP_PRSR_ACT_DEFAULT);
1522
1523         size = sizeof(((struct rte_flow_item_vxlan *)NULL)->rsvd1);
1524         ulp_rte_prsr_fld_mask(params, &idx, size,
1525                               ulp_deference_struct(vxlan_spec, rsvd1),
1526                               ulp_deference_struct(vxlan_mask, rsvd1),
1527                               ULP_PRSR_ACT_DEFAULT);
1528
1529         /* Update the hdr_bitmap with vxlan */
1530         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1531         ulp_rte_l4_proto_type_update(params, 0);
1532         return BNXT_TF_RC_SUCCESS;
1533 }
1534
1535 /* Function to handle the parsing of RTE Flow item GRE Header. */
1536 int32_t
1537 ulp_rte_gre_hdr_handler(const struct rte_flow_item *item,
1538                         struct ulp_rte_parser_params *params)
1539 {
1540         const struct rte_flow_item_gre *gre_spec = item->spec;
1541         const struct rte_flow_item_gre *gre_mask = item->mask;
1542         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1543         uint32_t idx = 0;
1544         uint32_t size;
1545
1546         if (ulp_rte_prsr_fld_size_validate(params, &idx,
1547                                            BNXT_ULP_PROTO_HDR_GRE_NUM)) {
1548                 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1549                 return BNXT_TF_RC_ERROR;
1550         }
1551
1552         size = sizeof(((struct rte_flow_item_gre *)NULL)->c_rsvd0_ver);
1553         ulp_rte_prsr_fld_mask(params, &idx, size,
1554                               ulp_deference_struct(gre_spec, c_rsvd0_ver),
1555                               ulp_deference_struct(gre_mask, c_rsvd0_ver),
1556                               ULP_PRSR_ACT_DEFAULT);
1557
1558         size = sizeof(((struct rte_flow_item_gre *)NULL)->protocol);
1559         ulp_rte_prsr_fld_mask(params, &idx, size,
1560                               ulp_deference_struct(gre_spec, protocol),
1561                               ulp_deference_struct(gre_mask, protocol),
1562                               ULP_PRSR_ACT_DEFAULT);
1563
1564         /* Update the hdr_bitmap with GRE */
1565         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
1566         ulp_rte_l4_proto_type_update(params, 0);
1567         return BNXT_TF_RC_SUCCESS;
1568 }
1569
1570 /* Function to handle the parsing of RTE Flow item ANY. */
1571 int32_t
1572 ulp_rte_item_any_handler(const struct rte_flow_item *item __rte_unused,
1573                          struct ulp_rte_parser_params *params __rte_unused)
1574 {
1575         return BNXT_TF_RC_SUCCESS;
1576 }
1577
1578 /* Function to handle the parsing of RTE Flow item ICMP Header. */
1579 int32_t
1580 ulp_rte_icmp_hdr_handler(const struct rte_flow_item *item,
1581                          struct ulp_rte_parser_params *params)
1582 {
1583         const struct rte_flow_item_icmp *icmp_spec = item->spec;
1584         const struct rte_flow_item_icmp *icmp_mask = item->mask;
1585         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1586         uint32_t idx = 0;
1587         uint32_t size;
1588
1589         if (ulp_rte_prsr_fld_size_validate(params, &idx,
1590                                            BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1591                 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1592                 return BNXT_TF_RC_ERROR;
1593         }
1594
1595         size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_type);
1596         ulp_rte_prsr_fld_mask(params, &idx, size,
1597                               ulp_deference_struct(icmp_spec, hdr.icmp_type),
1598                               ulp_deference_struct(icmp_mask, hdr.icmp_type),
1599                               ULP_PRSR_ACT_DEFAULT);
1600
1601         size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_code);
1602         ulp_rte_prsr_fld_mask(params, &idx, size,
1603                               ulp_deference_struct(icmp_spec, hdr.icmp_code),
1604                               ulp_deference_struct(icmp_mask, hdr.icmp_code),
1605                               ULP_PRSR_ACT_DEFAULT);
1606
1607         size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_cksum);
1608         ulp_rte_prsr_fld_mask(params, &idx, size,
1609                               ulp_deference_struct(icmp_spec, hdr.icmp_cksum),
1610                               ulp_deference_struct(icmp_mask, hdr.icmp_cksum),
1611                               ULP_PRSR_ACT_DEFAULT);
1612
1613         size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_ident);
1614         ulp_rte_prsr_fld_mask(params, &idx, size,
1615                               ulp_deference_struct(icmp_spec, hdr.icmp_ident),
1616                               ulp_deference_struct(icmp_mask, hdr.icmp_ident),
1617                               ULP_PRSR_ACT_DEFAULT);
1618
1619         size = sizeof(((struct rte_flow_item_icmp *)NULL)->hdr.icmp_seq_nb);
1620         ulp_rte_prsr_fld_mask(params, &idx, size,
1621                               ulp_deference_struct(icmp_spec, hdr.icmp_seq_nb),
1622                               ulp_deference_struct(icmp_mask, hdr.icmp_seq_nb),
1623                               ULP_PRSR_ACT_DEFAULT);
1624
1625         /* Update the hdr_bitmap with ICMP */
1626         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1627                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1628         else
1629                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1630         return BNXT_TF_RC_SUCCESS;
1631 }
1632
1633 /* Function to handle the parsing of RTE Flow item ICMP6 Header. */
1634 int32_t
1635 ulp_rte_icmp6_hdr_handler(const struct rte_flow_item *item,
1636                           struct ulp_rte_parser_params *params)
1637 {
1638         const struct rte_flow_item_icmp6 *icmp_spec = item->spec;
1639         const struct rte_flow_item_icmp6 *icmp_mask = item->mask;
1640         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1641         uint32_t idx = 0;
1642         uint32_t size;
1643
1644         if (ulp_rte_prsr_fld_size_validate(params, &idx,
1645                                            BNXT_ULP_PROTO_HDR_ICMP_NUM)) {
1646                 BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
1647                 return BNXT_TF_RC_ERROR;
1648         }
1649
1650         size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->type);
1651         ulp_rte_prsr_fld_mask(params, &idx, size,
1652                               ulp_deference_struct(icmp_spec, type),
1653                               ulp_deference_struct(icmp_mask, type),
1654                               ULP_PRSR_ACT_DEFAULT);
1655
1656         size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->code);
1657         ulp_rte_prsr_fld_mask(params, &idx, size,
1658                               ulp_deference_struct(icmp_spec, code),
1659                               ulp_deference_struct(icmp_mask, code),
1660                               ULP_PRSR_ACT_DEFAULT);
1661
1662         size = sizeof(((struct rte_flow_item_icmp6 *)NULL)->checksum);
1663         ulp_rte_prsr_fld_mask(params, &idx, size,
1664                               ulp_deference_struct(icmp_spec, checksum),
1665                               ulp_deference_struct(icmp_mask, checksum),
1666                               ULP_PRSR_ACT_DEFAULT);
1667
1668         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4)) {
1669                 BNXT_TF_DBG(ERR, "Error: incorrect icmp version\n");
1670                 return BNXT_TF_RC_ERROR;
1671         }
1672
1673         /* Update the hdr_bitmap with ICMP */
1674         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN))
1675                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ICMP);
1676         else
1677                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ICMP);
1678         return BNXT_TF_RC_SUCCESS;
1679 }
1680
1681 /* Function to handle the parsing of RTE Flow item void Header */
1682 int32_t
1683 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1684                          struct ulp_rte_parser_params *params __rte_unused)
1685 {
1686         return BNXT_TF_RC_SUCCESS;
1687 }
1688
1689 /* Function to handle the parsing of RTE Flow action void Header. */
1690 int32_t
1691 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1692                          struct ulp_rte_parser_params *params __rte_unused)
1693 {
1694         return BNXT_TF_RC_SUCCESS;
1695 }
1696
1697 /* Function to handle the parsing of RTE Flow action Mark Header. */
1698 int32_t
1699 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1700                          struct ulp_rte_parser_params *param)
1701 {
1702         const struct rte_flow_action_mark *mark;
1703         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1704         uint32_t mark_id;
1705
1706         mark = action_item->conf;
1707         if (mark) {
1708                 mark_id = tfp_cpu_to_be_32(mark->id);
1709                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1710                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1711
1712                 /* Update the hdr_bitmap with vxlan */
1713                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_MARK);
1714                 return BNXT_TF_RC_SUCCESS;
1715         }
1716         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1717         return BNXT_TF_RC_ERROR;
1718 }
1719
1720 /* Function to handle the parsing of RTE Flow action RSS Header. */
1721 int32_t
1722 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1723                         struct ulp_rte_parser_params *param)
1724 {
1725         const struct rte_flow_action_rss *rss;
1726         struct ulp_rte_act_prop *ap = &param->act_prop;
1727
1728         if (action_item == NULL || action_item->conf == NULL) {
1729                 BNXT_TF_DBG(ERR, "Parse Err: invalid rss configuration\n");
1730                 return BNXT_TF_RC_ERROR;
1731         }
1732
1733         rss = action_item->conf;
1734         /* Copy the rss into the specific action properties */
1735         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_TYPES], &rss->types,
1736                BNXT_ULP_ACT_PROP_SZ_RSS_TYPES);
1737         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_LEVEL], &rss->level,
1738                BNXT_ULP_ACT_PROP_SZ_RSS_LEVEL);
1739         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY_LEN],
1740                &rss->key_len, BNXT_ULP_ACT_PROP_SZ_RSS_KEY_LEN);
1741
1742         if (rss->key_len > BNXT_ULP_ACT_PROP_SZ_RSS_KEY) {
1743                 BNXT_TF_DBG(ERR, "Parse Err: RSS key too big\n");
1744                 return BNXT_TF_RC_ERROR;
1745         }
1746         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_RSS_KEY], rss->key,
1747                rss->key_len);
1748
1749         /* set the RSS action header bit */
1750         ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACT_BIT_RSS);
1751
1752         return BNXT_TF_RC_SUCCESS;
1753 }
1754
1755 /* Function to handle the parsing of RTE Flow item eth Header. */
1756 static void
1757 ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
1758                             const struct rte_flow_item_eth *eth_spec)
1759 {
1760         struct ulp_rte_hdr_field *field;
1761         uint32_t size;
1762
1763         field = &params->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
1764         size = sizeof(eth_spec->dst.addr_bytes);
1765         field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
1766
1767         size = sizeof(eth_spec->src.addr_bytes);
1768         field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
1769
1770         size = sizeof(eth_spec->type);
1771         field = ulp_rte_parser_fld_copy(field, &eth_spec->type, size);
1772
1773         ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
1774 }
1775
1776 /* Function to handle the parsing of RTE Flow item vlan Header. */
1777 static void
1778 ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
1779                              const struct rte_flow_item_vlan *vlan_spec,
1780                              uint32_t inner)
1781 {
1782         struct ulp_rte_hdr_field *field;
1783         uint32_t size;
1784
1785         if (!inner) {
1786                 field = &params->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
1787                 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1788                                BNXT_ULP_HDR_BIT_OO_VLAN);
1789         } else {
1790                 field = &params->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
1791                 ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
1792                                BNXT_ULP_HDR_BIT_OI_VLAN);
1793         }
1794
1795         size = sizeof(vlan_spec->tci);
1796         field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
1797
1798         size = sizeof(vlan_spec->inner_type);
1799         field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
1800 }
1801
1802 /* Function to handle the parsing of RTE Flow item ipv4 Header. */
1803 static void
1804 ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
1805                              const struct rte_flow_item_ipv4 *ip)
1806 {
1807         struct ulp_rte_hdr_field *field;
1808         uint32_t size;
1809         uint8_t val8;
1810
1811         field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
1812         size = sizeof(ip->hdr.version_ihl);
1813         if (!ip->hdr.version_ihl)
1814                 val8 = RTE_IPV4_VHL_DEF;
1815         else
1816                 val8 = ip->hdr.version_ihl;
1817         field = ulp_rte_parser_fld_copy(field, &val8, size);
1818
1819         size = sizeof(ip->hdr.type_of_service);
1820         field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
1821
1822         size = sizeof(ip->hdr.packet_id);
1823         field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
1824
1825         size = sizeof(ip->hdr.fragment_offset);
1826         field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
1827
1828         size = sizeof(ip->hdr.time_to_live);
1829         if (!ip->hdr.time_to_live)
1830                 val8 = BNXT_ULP_DEFAULT_TTL;
1831         else
1832                 val8 = ip->hdr.time_to_live;
1833         field = ulp_rte_parser_fld_copy(field, &val8, size);
1834
1835         size = sizeof(ip->hdr.next_proto_id);
1836         field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
1837
1838         size = sizeof(ip->hdr.src_addr);
1839         field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1840
1841         size = sizeof(ip->hdr.dst_addr);
1842         field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1843
1844         ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
1845 }
1846
1847 /* Function to handle the parsing of RTE Flow item ipv6 Header. */
1848 static void
1849 ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
1850                              const struct rte_flow_item_ipv6 *ip)
1851 {
1852         struct ulp_rte_hdr_field *field;
1853         uint32_t size;
1854         uint32_t val32;
1855         uint8_t val8;
1856
1857         field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
1858         size = sizeof(ip->hdr.vtc_flow);
1859         if (!ip->hdr.vtc_flow)
1860                 val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
1861         else
1862                 val32 = ip->hdr.vtc_flow;
1863         field = ulp_rte_parser_fld_copy(field, &val32, size);
1864
1865         size = sizeof(ip->hdr.proto);
1866         field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
1867
1868         size = sizeof(ip->hdr.hop_limits);
1869         if (!ip->hdr.hop_limits)
1870                 val8 = BNXT_ULP_DEFAULT_TTL;
1871         else
1872                 val8 = ip->hdr.hop_limits;
1873         field = ulp_rte_parser_fld_copy(field, &val8, size);
1874
1875         size = sizeof(ip->hdr.src_addr);
1876         field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
1877
1878         size = sizeof(ip->hdr.dst_addr);
1879         field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
1880
1881         ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
1882 }
1883
1884 /* Function to handle the parsing of RTE Flow item UDP Header. */
1885 static void
1886 ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
1887                             const struct rte_flow_item_udp *udp_spec)
1888 {
1889         struct ulp_rte_hdr_field *field;
1890         uint32_t size;
1891         uint8_t type = IPPROTO_UDP;
1892
1893         field = &params->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
1894         size = sizeof(udp_spec->hdr.src_port);
1895         field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
1896
1897         size = sizeof(udp_spec->hdr.dst_port);
1898         field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
1899
1900         ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
1901
1902         /* Update thhe ip header protocol */
1903         field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
1904         ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1905         field = &params->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
1906         ulp_rte_parser_fld_copy(field, &type, sizeof(type));
1907 }
1908
1909 /* Function to handle the parsing of RTE Flow item vxlan Header. */
1910 static void
1911 ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
1912                               struct rte_flow_item_vxlan *vxlan_spec)
1913 {
1914         struct ulp_rte_hdr_field *field;
1915         uint32_t size;
1916
1917         field = &params->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
1918         size = sizeof(vxlan_spec->flags);
1919         field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size);
1920
1921         size = sizeof(vxlan_spec->rsvd0);
1922         field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size);
1923
1924         size = sizeof(vxlan_spec->vni);
1925         field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size);
1926
1927         size = sizeof(vxlan_spec->rsvd1);
1928         field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size);
1929
1930         ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1931 }
1932
1933 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1934 int32_t
1935 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1936                                 struct ulp_rte_parser_params *params)
1937 {
1938         const struct rte_flow_action_vxlan_encap *vxlan_encap;
1939         const struct rte_flow_item *item;
1940         const struct rte_flow_item_ipv4 *ipv4_spec;
1941         const struct rte_flow_item_ipv6 *ipv6_spec;
1942         struct rte_flow_item_vxlan vxlan_spec;
1943         uint32_t vlan_num = 0, vlan_size = 0;
1944         uint32_t ip_size = 0, ip_type = 0;
1945         uint32_t vxlan_size = 0;
1946         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1947         struct ulp_rte_act_prop *ap = &params->act_prop;
1948
1949         vxlan_encap = action_item->conf;
1950         if (!vxlan_encap) {
1951                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1952                 return BNXT_TF_RC_ERROR;
1953         }
1954
1955         item = vxlan_encap->definition;
1956         if (!item) {
1957                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1958                 return BNXT_TF_RC_ERROR;
1959         }
1960
1961         if (!ulp_rte_item_skip_void(&item, 0))
1962                 return BNXT_TF_RC_ERROR;
1963
1964         /* must have ethernet header */
1965         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1966                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1967                 return BNXT_TF_RC_ERROR;
1968         }
1969
1970         /* Parse the ethernet header */
1971         if (item->spec)
1972                 ulp_rte_enc_eth_hdr_handler(params, item->spec);
1973
1974         /* Goto the next item */
1975         if (!ulp_rte_item_skip_void(&item, 1))
1976                 return BNXT_TF_RC_ERROR;
1977
1978         /* May have vlan header */
1979         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1980                 vlan_num++;
1981                 if (item->spec)
1982                         ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
1983
1984                 if (!ulp_rte_item_skip_void(&item, 1))
1985                         return BNXT_TF_RC_ERROR;
1986         }
1987
1988         /* may have two vlan headers */
1989         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1990                 vlan_num++;
1991                 if (item->spec)
1992                         ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
1993
1994                 if (!ulp_rte_item_skip_void(&item, 1))
1995                         return BNXT_TF_RC_ERROR;
1996         }
1997
1998         /* Update the vlan count and size of more than one */
1999         if (vlan_num) {
2000                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
2001                 vlan_num = tfp_cpu_to_be_32(vlan_num);
2002                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
2003                        &vlan_num,
2004                        sizeof(uint32_t));
2005                 vlan_size = tfp_cpu_to_be_32(vlan_size);
2006                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
2007                        &vlan_size,
2008                        sizeof(uint32_t));
2009         }
2010
2011         /* L3 must be IPv4, IPv6 */
2012         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
2013                 ipv4_spec = item->spec;
2014                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
2015
2016                 /* Update the ip size details */
2017                 ip_size = tfp_cpu_to_be_32(ip_size);
2018                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2019                        &ip_size, sizeof(uint32_t));
2020
2021                 /* update the ip type */
2022                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
2023                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2024                        &ip_type, sizeof(uint32_t));
2025
2026                 /* update the computed field to notify it is ipv4 header */
2027                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
2028                                     1);
2029                 if (ipv4_spec)
2030                         ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
2031
2032                 if (!ulp_rte_item_skip_void(&item, 1))
2033                         return BNXT_TF_RC_ERROR;
2034         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2035                 ipv6_spec = item->spec;
2036                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
2037
2038                 /* Update the ip size details */
2039                 ip_size = tfp_cpu_to_be_32(ip_size);
2040                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
2041                        &ip_size, sizeof(uint32_t));
2042
2043                  /* update the ip type */
2044                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
2045                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
2046                        &ip_type, sizeof(uint32_t));
2047
2048                 /* update the computed field to notify it is ipv6 header */
2049                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
2050                                     1);
2051                 if (ipv6_spec)
2052                         ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
2053
2054                 if (!ulp_rte_item_skip_void(&item, 1))
2055                         return BNXT_TF_RC_ERROR;
2056         } else {
2057                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
2058                 return BNXT_TF_RC_ERROR;
2059         }
2060
2061         /* L4 is UDP */
2062         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
2063                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
2064                 return BNXT_TF_RC_ERROR;
2065         }
2066         if (item->spec)
2067                 ulp_rte_enc_udp_hdr_handler(params, item->spec);
2068
2069         if (!ulp_rte_item_skip_void(&item, 1))
2070                 return BNXT_TF_RC_ERROR;
2071
2072         /* Finally VXLAN */
2073         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2074                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
2075                 return BNXT_TF_RC_ERROR;
2076         }
2077         vxlan_size = sizeof(struct rte_flow_item_vxlan);
2078         /* copy the vxlan details */
2079         memcpy(&vxlan_spec, item->spec, vxlan_size);
2080         vxlan_spec.flags = 0x08;
2081         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
2082         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
2083                &vxlan_size, sizeof(uint32_t));
2084
2085         ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
2086
2087         /* update the hdr_bitmap with vxlan */
2088         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
2089         return BNXT_TF_RC_SUCCESS;
2090 }
2091
2092 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
2093 int32_t
2094 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
2095                                 __rte_unused,
2096                                 struct ulp_rte_parser_params *params)
2097 {
2098         /* update the hdr_bitmap with vxlan */
2099         ULP_BITMAP_SET(params->act_bitmap.bits,
2100                        BNXT_ULP_ACT_BIT_VXLAN_DECAP);
2101         /* Update computational field with tunnel decap info */
2102         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
2103         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
2104         return BNXT_TF_RC_SUCCESS;
2105 }
2106
2107 /* Function to handle the parsing of RTE Flow action drop Header. */
2108 int32_t
2109 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
2110                          struct ulp_rte_parser_params *params)
2111 {
2112         /* Update the hdr_bitmap with drop */
2113         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DROP);
2114         return BNXT_TF_RC_SUCCESS;
2115 }
2116
2117 /* Function to handle the parsing of RTE Flow action count. */
2118 int32_t
2119 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
2120                           struct ulp_rte_parser_params *params)
2121 {
2122         const struct rte_flow_action_count *act_count;
2123         struct ulp_rte_act_prop *act_prop = &params->act_prop;
2124
2125         act_count = action_item->conf;
2126         if (act_count) {
2127                 if (act_count->shared) {
2128                         BNXT_TF_DBG(ERR,
2129                                     "Parse Error:Shared count not supported\n");
2130                         return BNXT_TF_RC_PARSE_ERR;
2131                 }
2132                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
2133                        &act_count->id,
2134                        BNXT_ULP_ACT_PROP_SZ_COUNT);
2135         }
2136
2137         /* Update the hdr_bitmap with count */
2138         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_COUNT);
2139         return BNXT_TF_RC_SUCCESS;
2140 }
2141
2142 /* Function to handle the parsing of action ports. */
2143 static int32_t
2144 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
2145                             uint32_t ifindex)
2146 {
2147         enum bnxt_ulp_direction_type dir;
2148         uint16_t pid_s;
2149         uint32_t pid;
2150         struct ulp_rte_act_prop *act = &param->act_prop;
2151         enum bnxt_ulp_intf_type port_type;
2152         uint32_t vnic_type;
2153
2154         /* Get the direction */
2155         dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
2156         if (dir == BNXT_ULP_DIR_EGRESS) {
2157                 /* For egress direction, fill vport */
2158                 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
2159                         return BNXT_TF_RC_ERROR;
2160
2161                 pid = pid_s;
2162                 pid = rte_cpu_to_be_32(pid);
2163                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2164                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2165         } else {
2166                 /* For ingress direction, fill vnic */
2167                 port_type = ULP_COMP_FLD_IDX_RD(param,
2168                                                 BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
2169                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
2170                         vnic_type = BNXT_ULP_VF_FUNC_VNIC;
2171                 else
2172                         vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
2173
2174                 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
2175                                                  vnic_type, &pid_s))
2176                         return BNXT_TF_RC_ERROR;
2177
2178                 pid = pid_s;
2179                 pid = rte_cpu_to_be_32(pid);
2180                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
2181                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
2182         }
2183
2184         /* Update the action port set bit */
2185         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2186         return BNXT_TF_RC_SUCCESS;
2187 }
2188
2189 /* Function to handle the parsing of RTE Flow action PF. */
2190 int32_t
2191 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
2192                        struct ulp_rte_parser_params *params)
2193 {
2194         uint32_t port_id;
2195         uint32_t ifindex;
2196         enum bnxt_ulp_intf_type intf_type;
2197
2198         /* Get the port id of the current device */
2199         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
2200
2201         /* Get the port db ifindex */
2202         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
2203                                               &ifindex)) {
2204                 BNXT_TF_DBG(ERR, "Invalid port id\n");
2205                 return BNXT_TF_RC_ERROR;
2206         }
2207
2208         /* Check the port is PF port */
2209         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2210         if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
2211                 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
2212                 return BNXT_TF_RC_ERROR;
2213         }
2214         /* Update the action properties */
2215         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2216         return ulp_rte_parser_act_port_set(params, ifindex);
2217 }
2218
2219 /* Function to handle the parsing of RTE Flow action VF. */
2220 int32_t
2221 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
2222                        struct ulp_rte_parser_params *params)
2223 {
2224         const struct rte_flow_action_vf *vf_action;
2225         enum bnxt_ulp_intf_type intf_type;
2226         uint32_t ifindex;
2227         struct bnxt *bp;
2228
2229         vf_action = action_item->conf;
2230         if (!vf_action) {
2231                 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
2232                 return BNXT_TF_RC_PARSE_ERR;
2233         }
2234
2235         if (vf_action->original) {
2236                 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
2237                 return BNXT_TF_RC_PARSE_ERR;
2238         }
2239
2240         bp = bnxt_pmd_get_bp(params->port_id);
2241         if (bp == NULL) {
2242                 BNXT_TF_DBG(ERR, "Invalid bp\n");
2243                 return BNXT_TF_RC_ERROR;
2244         }
2245
2246         /* vf_action->id is a logical number which in this case is an
2247          * offset from the first VF. So, to get the absolute VF id, the
2248          * offset must be added to the absolute first vf id of that port.
2249          */
2250         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
2251                                                  bp->first_vf_id +
2252                                                  vf_action->id,
2253                                                  &ifindex)) {
2254                 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
2255                 return BNXT_TF_RC_ERROR;
2256         }
2257         /* Check the port is VF port */
2258         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
2259         if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
2260             intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
2261                 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
2262                 return BNXT_TF_RC_ERROR;
2263         }
2264
2265         /* Update the action properties */
2266         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2267         return ulp_rte_parser_act_port_set(params, ifindex);
2268 }
2269
2270 /* Function to handle the parsing of RTE Flow action port_id. */
2271 int32_t
2272 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
2273                             struct ulp_rte_parser_params *param)
2274 {
2275         const struct rte_flow_action_port_id *port_id = act_item->conf;
2276         uint32_t ifindex;
2277         enum bnxt_ulp_intf_type intf_type;
2278
2279         if (!port_id) {
2280                 BNXT_TF_DBG(ERR,
2281                             "ParseErr: Invalid Argument\n");
2282                 return BNXT_TF_RC_PARSE_ERR;
2283         }
2284         if (port_id->original) {
2285                 BNXT_TF_DBG(ERR,
2286                             "ParseErr:Portid Original not supported\n");
2287                 return BNXT_TF_RC_PARSE_ERR;
2288         }
2289
2290         /* Get the port db ifindex */
2291         if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
2292                                               &ifindex)) {
2293                 BNXT_TF_DBG(ERR, "Invalid port id\n");
2294                 return BNXT_TF_RC_ERROR;
2295         }
2296
2297         /* Get the intf type */
2298         intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
2299         if (!intf_type) {
2300                 BNXT_TF_DBG(ERR, "Invalid port type\n");
2301                 return BNXT_TF_RC_ERROR;
2302         }
2303
2304         /* Set the action port */
2305         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
2306         return ulp_rte_parser_act_port_set(param, ifindex);
2307 }
2308
2309 /* Function to handle the parsing of RTE Flow action phy_port. */
2310 int32_t
2311 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
2312                              struct ulp_rte_parser_params *prm)
2313 {
2314         const struct rte_flow_action_phy_port *phy_port;
2315         uint32_t pid;
2316         int32_t rc;
2317         uint16_t pid_s;
2318         enum bnxt_ulp_direction_type dir;
2319
2320         phy_port = action_item->conf;
2321         if (!phy_port) {
2322                 BNXT_TF_DBG(ERR,
2323                             "ParseErr: Invalid Argument\n");
2324                 return BNXT_TF_RC_PARSE_ERR;
2325         }
2326
2327         if (phy_port->original) {
2328                 BNXT_TF_DBG(ERR,
2329                             "Parse Err:Port Original not supported\n");
2330                 return BNXT_TF_RC_PARSE_ERR;
2331         }
2332         dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
2333         if (dir != BNXT_ULP_DIR_EGRESS) {
2334                 BNXT_TF_DBG(ERR,
2335                             "Parse Err:Phy ports are valid only for egress\n");
2336                 return BNXT_TF_RC_PARSE_ERR;
2337         }
2338         /* Get the physical port details from port db */
2339         rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
2340                                             &pid_s);
2341         if (rc) {
2342                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
2343                 return -EINVAL;
2344         }
2345
2346         pid = pid_s;
2347         pid = rte_cpu_to_be_32(pid);
2348         memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
2349                &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
2350
2351         /* Update the action port set bit */
2352         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
2353         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
2354                             BNXT_ULP_INTF_TYPE_PHY_PORT);
2355         return BNXT_TF_RC_SUCCESS;
2356 }
2357
2358 /* Function to handle the parsing of RTE Flow action pop vlan. */
2359 int32_t
2360 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
2361                                 struct ulp_rte_parser_params *params)
2362 {
2363         /* Update the act_bitmap with pop */
2364         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_POP_VLAN);
2365         return BNXT_TF_RC_SUCCESS;
2366 }
2367
2368 /* Function to handle the parsing of RTE Flow action push vlan. */
2369 int32_t
2370 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
2371                                  struct ulp_rte_parser_params *params)
2372 {
2373         const struct rte_flow_action_of_push_vlan *push_vlan;
2374         uint16_t ethertype;
2375         struct ulp_rte_act_prop *act = &params->act_prop;
2376
2377         push_vlan = action_item->conf;
2378         if (push_vlan) {
2379                 ethertype = push_vlan->ethertype;
2380                 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
2381                         BNXT_TF_DBG(ERR,
2382                                     "Parse Err: Ethertype not supported\n");
2383                         return BNXT_TF_RC_PARSE_ERR;
2384                 }
2385                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
2386                        &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
2387                 /* Update the hdr_bitmap with push vlan */
2388                 ULP_BITMAP_SET(params->act_bitmap.bits,
2389                                BNXT_ULP_ACT_BIT_PUSH_VLAN);
2390                 return BNXT_TF_RC_SUCCESS;
2391         }
2392         BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
2393         return BNXT_TF_RC_ERROR;
2394 }
2395
2396 /* Function to handle the parsing of RTE Flow action set vlan id. */
2397 int32_t
2398 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
2399                                     struct ulp_rte_parser_params *params)
2400 {
2401         const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
2402         uint32_t vid;
2403         struct ulp_rte_act_prop *act = &params->act_prop;
2404
2405         vlan_vid = action_item->conf;
2406         if (vlan_vid && vlan_vid->vlan_vid) {
2407                 vid = vlan_vid->vlan_vid;
2408                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
2409                        &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
2410                 /* Update the hdr_bitmap with vlan vid */
2411                 ULP_BITMAP_SET(params->act_bitmap.bits,
2412                                BNXT_ULP_ACT_BIT_SET_VLAN_VID);
2413                 return BNXT_TF_RC_SUCCESS;
2414         }
2415         BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
2416         return BNXT_TF_RC_ERROR;
2417 }
2418
2419 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
2420 int32_t
2421 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
2422                                     struct ulp_rte_parser_params *params)
2423 {
2424         const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
2425         uint8_t pcp;
2426         struct ulp_rte_act_prop *act = &params->act_prop;
2427
2428         vlan_pcp = action_item->conf;
2429         if (vlan_pcp) {
2430                 pcp = vlan_pcp->vlan_pcp;
2431                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
2432                        &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
2433                 /* Update the hdr_bitmap with vlan vid */
2434                 ULP_BITMAP_SET(params->act_bitmap.bits,
2435                                BNXT_ULP_ACT_BIT_SET_VLAN_PCP);
2436                 return BNXT_TF_RC_SUCCESS;
2437         }
2438         BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
2439         return BNXT_TF_RC_ERROR;
2440 }
2441
2442 /* Function to handle the parsing of RTE Flow action set ipv4 src.*/
2443 int32_t
2444 ulp_rte_set_ipv4_src_act_handler(const struct rte_flow_action *action_item,
2445                                  struct ulp_rte_parser_params *params)
2446 {
2447         const struct rte_flow_action_set_ipv4 *set_ipv4;
2448         struct ulp_rte_act_prop *act = &params->act_prop;
2449
2450         set_ipv4 = action_item->conf;
2451         if (set_ipv4) {
2452                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_SRC],
2453                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_SRC);
2454                 /* Update the hdr_bitmap with set ipv4 src */
2455                 ULP_BITMAP_SET(params->act_bitmap.bits,
2456                                BNXT_ULP_ACT_BIT_SET_IPV4_SRC);
2457                 return BNXT_TF_RC_SUCCESS;
2458         }
2459         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 src arg is invalid\n");
2460         return BNXT_TF_RC_ERROR;
2461 }
2462
2463 /* Function to handle the parsing of RTE Flow action set ipv4 dst.*/
2464 int32_t
2465 ulp_rte_set_ipv4_dst_act_handler(const struct rte_flow_action *action_item,
2466                                  struct ulp_rte_parser_params *params)
2467 {
2468         const struct rte_flow_action_set_ipv4 *set_ipv4;
2469         struct ulp_rte_act_prop *act = &params->act_prop;
2470
2471         set_ipv4 = action_item->conf;
2472         if (set_ipv4) {
2473                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_IPV4_DST],
2474                        &set_ipv4->ipv4_addr, BNXT_ULP_ACT_PROP_SZ_SET_IPV4_DST);
2475                 /* Update the hdr_bitmap with set ipv4 dst */
2476                 ULP_BITMAP_SET(params->act_bitmap.bits,
2477                                BNXT_ULP_ACT_BIT_SET_IPV4_DST);
2478                 return BNXT_TF_RC_SUCCESS;
2479         }
2480         BNXT_TF_DBG(ERR, "Parse Error: set ipv4 dst arg is invalid\n");
2481         return BNXT_TF_RC_ERROR;
2482 }
2483
2484 /* Function to handle the parsing of RTE Flow action set tp src.*/
2485 int32_t
2486 ulp_rte_set_tp_src_act_handler(const struct rte_flow_action *action_item,
2487                                struct ulp_rte_parser_params *params)
2488 {
2489         const struct rte_flow_action_set_tp *set_tp;
2490         struct ulp_rte_act_prop *act = &params->act_prop;
2491
2492         set_tp = action_item->conf;
2493         if (set_tp) {
2494                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_SRC],
2495                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_SRC);
2496                 /* Update the hdr_bitmap with set tp src */
2497                 ULP_BITMAP_SET(params->act_bitmap.bits,
2498                                BNXT_ULP_ACT_BIT_SET_TP_SRC);
2499                 return BNXT_TF_RC_SUCCESS;
2500         }
2501
2502         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2503         return BNXT_TF_RC_ERROR;
2504 }
2505
2506 /* Function to handle the parsing of RTE Flow action set tp dst.*/
2507 int32_t
2508 ulp_rte_set_tp_dst_act_handler(const struct rte_flow_action *action_item,
2509                                struct ulp_rte_parser_params *params)
2510 {
2511         const struct rte_flow_action_set_tp *set_tp;
2512         struct ulp_rte_act_prop *act = &params->act_prop;
2513
2514         set_tp = action_item->conf;
2515         if (set_tp) {
2516                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_TP_DST],
2517                        &set_tp->port, BNXT_ULP_ACT_PROP_SZ_SET_TP_DST);
2518                 /* Update the hdr_bitmap with set tp dst */
2519                 ULP_BITMAP_SET(params->act_bitmap.bits,
2520                                BNXT_ULP_ACT_BIT_SET_TP_DST);
2521                 return BNXT_TF_RC_SUCCESS;
2522         }
2523
2524         BNXT_TF_DBG(ERR, "Parse Error: set tp src arg is invalid\n");
2525         return BNXT_TF_RC_ERROR;
2526 }
2527
2528 /* Function to handle the parsing of RTE Flow action dec ttl.*/
2529 int32_t
2530 ulp_rte_dec_ttl_act_handler(const struct rte_flow_action *act __rte_unused,
2531                             struct ulp_rte_parser_params *params)
2532 {
2533         /* Update the act_bitmap with dec ttl */
2534         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_DEC_TTL);
2535         return BNXT_TF_RC_SUCCESS;
2536 }
2537
2538 /* Function to handle the parsing of RTE Flow action JUMP */
2539 int32_t
2540 ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
2541                          struct ulp_rte_parser_params *params)
2542 {
2543         /* Update the act_bitmap with dec ttl */
2544         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACT_BIT_JUMP);
2545         return BNXT_TF_RC_SUCCESS;
2546 }
2547
2548 int32_t
2549 ulp_rte_sample_act_handler(const struct rte_flow_action *action_item,
2550                            struct ulp_rte_parser_params *params)
2551 {
2552         const struct rte_flow_action_sample *sample;
2553         int ret;
2554
2555         sample = action_item->conf;
2556
2557         /* if SAMPLE bit is set it means this sample action is nested within the
2558          * actions of another sample action; this is not allowed
2559          */
2560         if (ULP_BITMAP_ISSET(params->act_bitmap.bits,
2561                              BNXT_ULP_ACT_BIT_SAMPLE))
2562                 return BNXT_TF_RC_ERROR;
2563
2564         /* a sample action is only allowed as a shared action */
2565         if (!ULP_BITMAP_ISSET(params->act_bitmap.bits,
2566                               BNXT_ULP_ACT_BIT_SHARED))
2567                 return BNXT_TF_RC_ERROR;
2568
2569         /* only a ratio of 1 i.e. 100% is supported */
2570         if (sample->ratio != 1)
2571                 return BNXT_TF_RC_ERROR;
2572
2573         if (!sample->actions)
2574                 return BNXT_TF_RC_ERROR;
2575
2576         /* parse the nested actions for a sample action */
2577         ret = bnxt_ulp_rte_parser_act_parse(sample->actions, params);
2578         if (ret == BNXT_TF_RC_SUCCESS)
2579                 /* Update the act_bitmap with sample */
2580                 ULP_BITMAP_SET(params->act_bitmap.bits,
2581                                BNXT_ULP_ACT_BIT_SAMPLE);
2582
2583         return ret;
2584 }
2585
2586 /* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
2587 int32_t
2588 ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
2589                                    struct ulp_rte_parser_params *params)
2590 {
2591         /* Set the F1 flow header bit */
2592         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
2593         return ulp_rte_vxlan_decap_act_handler(action_item, params);
2594 }
2595
2596 /* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
2597 int32_t
2598 ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
2599                                        struct ulp_rte_parser_params *params)
2600 {
2601         RTE_SET_USED(item);
2602         /* Set the F2 flow header bit */
2603         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
2604         return ulp_rte_vxlan_decap_act_handler(NULL, params);
2605 }