net/bnxt: update compute field list and access macros
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
12 #include "tfp.h"
13 #include "ulp_port_db.h"
14
15 /* Utility function to skip the void items. */
16 static inline int32_t
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
18 {
19         if (!*item)
20                 return 0;
21         if (increment)
22                 (*item)++;
23         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
24                 (*item)++;
25         if (*item)
26                 return 1;
27         return 0;
28 }
29
30 /* Utility function to update the field_bitmap */
31 static void
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
33                                    uint32_t idx)
34 {
35         struct ulp_rte_hdr_field *field;
36
37         field = &params->hdr_field[idx];
38         if (ulp_bitmap_notzero(field->mask, field->size)) {
39                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
40                 /* Not exact match */
41                 if (!ulp_bitmap_is_ones(field->mask, field->size))
42                         ULP_BITMAP_SET(params->fld_bitmap.bits,
43                                        BNXT_ULP_MATCH_TYPE_BITMASK_WM);
44         } else {
45                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
46         }
47 }
48
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
52                         const void *buffer,
53                         uint32_t size)
54 {
55         field->size = size;
56         memcpy(field->spec, buffer, field->size);
57         field++;
58         return field;
59 }
60
61 /* Utility function to copy field masks items */
62 static void
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
64                        uint32_t *idx,
65                        const void *buffer,
66                        uint32_t size)
67 {
68         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
69
70         memcpy(field->mask, buffer, size);
71         ulp_rte_parser_field_bitmap_update(params, *idx);
72         *idx = *idx + 1;
73 }
74
75 /*
76  * Function to handle the parsing of RTE Flows and placing
77  * the RTE flow items into the ulp structures.
78  */
79 int32_t
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81                               struct ulp_rte_parser_params *params)
82 {
83         const struct rte_flow_item *item = pattern;
84         struct bnxt_ulp_rte_hdr_info *hdr_info;
85
86         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87         if (params->dir == ULP_DIR_EGRESS)
88                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
90
91         /* Parse all the items in the pattern */
92         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93                 /* get the header information from the flow_hdr_info table */
94                 hdr_info = &ulp_hdr_info[item->type];
95                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
96                         BNXT_TF_DBG(ERR,
97                                     "Truflow parser does not support type %d\n",
98                                     item->type);
99                         return BNXT_TF_RC_PARSE_ERR;
100                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101                         /* call the registered callback handler */
102                         if (hdr_info->proto_hdr_func) {
103                                 if (hdr_info->proto_hdr_func(item, params) !=
104                                     BNXT_TF_RC_SUCCESS) {
105                                         return BNXT_TF_RC_ERROR;
106                                 }
107                         }
108                 }
109                 item++;
110         }
111         /* update the implied SVIF */
112         (void)ulp_rte_parser_svif_process(params);
113         return BNXT_TF_RC_SUCCESS;
114 }
115
116 /*
117  * Function to handle the parsing of RTE Flows and placing
118  * the RTE flow actions into the ulp structures.
119  */
120 int32_t
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122                               struct ulp_rte_parser_params *params)
123 {
124         const struct rte_flow_action *action_item = actions;
125         struct bnxt_ulp_rte_act_info *hdr_info;
126
127         /* Parse all the items in the pattern */
128         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
129                 /* get the header information from the flow_hdr_info table */
130                 hdr_info = &ulp_act_info[action_item->type];
131                 if (hdr_info->act_type ==
132                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
133                         BNXT_TF_DBG(ERR,
134                                     "Truflow parser does not support act %u\n",
135                                     action_item->type);
136                         return BNXT_TF_RC_ERROR;
137                 } else if (hdr_info->act_type ==
138                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
139                         /* call the registered callback handler */
140                         if (hdr_info->proto_act_func) {
141                                 if (hdr_info->proto_act_func(action_item,
142                                                              params) !=
143                                     BNXT_TF_RC_SUCCESS) {
144                                         return BNXT_TF_RC_ERROR;
145                                 }
146                         }
147                 }
148                 action_item++;
149         }
150         /* update the implied VNIC */
151         ulp_rte_parser_vnic_process(params);
152         return BNXT_TF_RC_SUCCESS;
153 }
154
155 /* Function to handle the parsing of RTE Flow item PF Header. */
156 static int32_t
157 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
158                         enum rte_flow_item_type proto,
159                         uint16_t svif,
160                         uint16_t mask)
161 {
162         uint16_t port_id = svif;
163         uint32_t dir = 0;
164         struct ulp_rte_hdr_field *hdr_field;
165         uint32_t ifindex;
166         int32_t rc;
167
168         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
169             BNXT_ULP_INVALID_SVIF_VAL) {
170                 BNXT_TF_DBG(ERR,
171                             "SVIF already set,multiple source not support'd\n");
172                 return BNXT_TF_RC_ERROR;
173         }
174
175         if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
176                 dir = ULP_COMP_FLD_IDX_RD(params,
177                                           BNXT_ULP_CF_IDX_DIRECTION);
178                 /* perform the conversion from dpdk port to bnxt svif */
179                 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
180                                                        &ifindex);
181                 if (rc) {
182                         BNXT_TF_DBG(ERR,
183                                     "Invalid port id\n");
184                         return BNXT_TF_RC_ERROR;
185                 }
186                 ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif);
187                 svif = rte_cpu_to_be_16(svif);
188         }
189         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
190         memcpy(hdr_field->spec, &svif, sizeof(svif));
191         memcpy(hdr_field->mask, &mask, sizeof(mask));
192         hdr_field->size = sizeof(svif);
193         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
194                             rte_be_to_cpu_16(svif));
195         return BNXT_TF_RC_SUCCESS;
196 }
197
198 /* Function to handle the parsing of the RTE port id */
199 int32_t
200 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
201 {
202         uint16_t port_id = 0;
203         uint16_t svif_mask = 0xFFFF;
204
205         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
206             BNXT_ULP_INVALID_SVIF_VAL)
207                 return BNXT_TF_RC_SUCCESS;
208
209         /* SVIF not set. So get the port id */
210         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
211
212         /* Update the SVIF details */
213         return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
214                                        port_id, svif_mask);
215 }
216
217 /* Function to handle the implicit VNIC RTE port id */
218 int32_t
219 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
220 {
221         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
222
223         if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
224             ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
225                 return BNXT_TF_RC_SUCCESS;
226
227         /* Update the vnic details */
228         ulp_rte_pf_act_handler(NULL, params);
229         return BNXT_TF_RC_SUCCESS;
230 }
231
232 /* Function to handle the parsing of RTE Flow item PF Header. */
233 int32_t
234 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
235                        struct ulp_rte_parser_params *params)
236 {
237         uint16_t port_id = 0;
238         uint16_t svif_mask = 0xFFFF;
239
240         /* Get the port id */
241         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
242
243         /* Update the SVIF details */
244         return ulp_rte_parser_svif_set(params,
245                                        item->type,
246                                        port_id, svif_mask);
247 }
248
249 /* Function to handle the parsing of RTE Flow item VF Header. */
250 int32_t
251 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
252                        struct ulp_rte_parser_params *params)
253 {
254         const struct rte_flow_item_vf *vf_spec = item->spec;
255         const struct rte_flow_item_vf *vf_mask = item->mask;
256         uint16_t svif = 0, mask = 0;
257
258         /* Get VF rte_flow_item for Port details */
259         if (vf_spec)
260                 svif = (uint16_t)vf_spec->id;
261         if (vf_mask)
262                 mask = (uint16_t)vf_mask->id;
263
264         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
265 }
266
267 /* Function to handle the parsing of RTE Flow item port id  Header. */
268 int32_t
269 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
270                             struct ulp_rte_parser_params *params)
271 {
272         const struct rte_flow_item_port_id *port_spec = item->spec;
273         const struct rte_flow_item_port_id *port_mask = item->mask;
274         uint16_t svif = 0, mask = 0;
275
276         /*
277          * Copy the rte_flow_item for Port into hdr_field using port id
278          * header fields.
279          */
280         if (port_spec)
281                 svif = (uint16_t)port_spec->id;
282         if (port_mask)
283                 mask = (uint16_t)port_mask->id;
284
285         /* Update the SVIF details */
286         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
287 }
288
289 /* Function to handle the parsing of RTE Flow item phy port Header. */
290 int32_t
291 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
292                              struct ulp_rte_parser_params *params)
293 {
294         const struct rte_flow_item_phy_port *port_spec = item->spec;
295         const struct rte_flow_item_phy_port *port_mask = item->mask;
296         uint32_t svif = 0, mask = 0;
297
298         /* Copy the rte_flow_item for phy port into hdr_field */
299         if (port_spec)
300                 svif = port_spec->index;
301         if (port_mask)
302                 mask = port_mask->index;
303
304         /* Update the SVIF details */
305         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
306 }
307
308 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
309 int32_t
310 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
311                         struct ulp_rte_parser_params *params)
312 {
313         const struct rte_flow_item_eth *eth_spec = item->spec;
314         const struct rte_flow_item_eth *eth_mask = item->mask;
315         struct ulp_rte_hdr_field *field;
316         uint32_t idx = params->field_idx;
317         uint64_t set_flag = 0;
318         uint32_t size;
319
320         /*
321          * Copy the rte_flow_item for eth into hdr_field using ethernet
322          * header fields
323          */
324         if (eth_spec) {
325                 size = sizeof(eth_spec->dst.addr_bytes);
326                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
327                                                 eth_spec->dst.addr_bytes,
328                                                 size);
329                 size = sizeof(eth_spec->src.addr_bytes);
330                 field = ulp_rte_parser_fld_copy(field,
331                                                 eth_spec->src.addr_bytes,
332                                                 size);
333                 field = ulp_rte_parser_fld_copy(field,
334                                                 &eth_spec->type,
335                                                 sizeof(eth_spec->type));
336         }
337         if (eth_mask) {
338                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
339                                        sizeof(eth_mask->dst.addr_bytes));
340                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
341                                        sizeof(eth_mask->src.addr_bytes));
342                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
343                                        sizeof(eth_mask->type));
344         }
345         /* Add number of vlan header elements */
346         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
347         params->vlan_idx = params->field_idx;
348         params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
349
350         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
351         set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
352                                     BNXT_ULP_HDR_BIT_O_ETH);
353         if (set_flag)
354                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
355         else
356                 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
357                                  BNXT_ULP_HDR_BIT_I_ETH);
358
359         /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
360         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
361
362         return BNXT_TF_RC_SUCCESS;
363 }
364
365 /* Function to handle the parsing of RTE Flow item Vlan Header. */
366 int32_t
367 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
368                          struct ulp_rte_parser_params *params)
369 {
370         const struct rte_flow_item_vlan *vlan_spec = item->spec;
371         const struct rte_flow_item_vlan *vlan_mask = item->mask;
372         struct ulp_rte_hdr_field *field;
373         struct ulp_rte_hdr_bitmap       *hdr_bit;
374         uint32_t idx = params->vlan_idx;
375         uint16_t vlan_tag, priority;
376         uint32_t outer_vtag_num;
377         uint32_t inner_vtag_num;
378
379         /*
380          * Copy the rte_flow_item for vlan into hdr_field using Vlan
381          * header fields
382          */
383         if (vlan_spec) {
384                 vlan_tag = ntohs(vlan_spec->tci);
385                 priority = htons(vlan_tag >> 13);
386                 vlan_tag &= 0xfff;
387                 vlan_tag = htons(vlan_tag);
388
389                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
390                                                 &priority,
391                                                 sizeof(priority));
392                 field = ulp_rte_parser_fld_copy(field,
393                                                 &vlan_tag,
394                                                 sizeof(vlan_tag));
395                 field = ulp_rte_parser_fld_copy(field,
396                                                 &vlan_spec->inner_type,
397                                                 sizeof(vlan_spec->inner_type));
398         }
399
400         if (vlan_mask) {
401                 vlan_tag = ntohs(vlan_mask->tci);
402                 priority = htons(vlan_tag >> 13);
403                 vlan_tag &= 0xfff;
404                 vlan_tag = htons(vlan_tag);
405
406                 field = &params->hdr_field[idx];
407                 memcpy(field->mask, &priority, field->size);
408                 field++;
409                 memcpy(field->mask, &vlan_tag, field->size);
410                 field++;
411                 memcpy(field->mask, &vlan_mask->inner_type, field->size);
412         }
413         /* Set the vlan index to new incremented value */
414         params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
415
416         /* Get the outer tag and inner tag counts */
417         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
418                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
419         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
420                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
421
422         /* Update the hdr_bitmap of the vlans */
423         hdr_bit = &params->hdr_bitmap;
424         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
425             !outer_vtag_num) {
426                 /* Update the vlan tag num */
427                 outer_vtag_num++;
428                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
429                                     outer_vtag_num);
430                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_PRESENT, 1);
431         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
432                    ULP_COMP_FLD_IDX_RD(params,
433                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
434                    outer_vtag_num == 1) {
435                 /* update the vlan tag num */
436                 outer_vtag_num++;
437                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
438                                     outer_vtag_num);
439                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
440         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
441                    ULP_COMP_FLD_IDX_RD(params,
442                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
443                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
444                    !inner_vtag_num) {
445                 /* update the vlan tag num */
446                 inner_vtag_num++;
447                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
448                                     inner_vtag_num);
449                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_PRESENT, 1);
450         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
451                    ULP_COMP_FLD_IDX_RD(params,
452                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
453                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
454                    ULP_COMP_FLD_IDX_RD(params,
455                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
456                    inner_vtag_num == 1) {
457                 /* update the vlan tag num */
458                 inner_vtag_num++;
459                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
460                                     inner_vtag_num);
461                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
462         } else {
463                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
464                 return BNXT_TF_RC_ERROR;
465         }
466         return BNXT_TF_RC_SUCCESS;
467 }
468
469 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
470 int32_t
471 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
472                          struct ulp_rte_parser_params *params)
473 {
474         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
475         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
476         struct ulp_rte_hdr_field *field;
477         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
478         uint32_t idx = params->field_idx;
479         uint32_t size;
480         uint32_t inner_l3, outer_l3;
481
482         inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
483         if (inner_l3) {
484                 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
485                 return BNXT_TF_RC_ERROR;
486         }
487
488         /*
489          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
490          * header fields
491          */
492         if (ipv4_spec) {
493                 size = sizeof(ipv4_spec->hdr.version_ihl);
494                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
495                                                 &ipv4_spec->hdr.version_ihl,
496                                                 size);
497                 size = sizeof(ipv4_spec->hdr.type_of_service);
498                 field = ulp_rte_parser_fld_copy(field,
499                                                 &ipv4_spec->hdr.type_of_service,
500                                                 size);
501                 size = sizeof(ipv4_spec->hdr.total_length);
502                 field = ulp_rte_parser_fld_copy(field,
503                                                 &ipv4_spec->hdr.total_length,
504                                                 size);
505                 size = sizeof(ipv4_spec->hdr.packet_id);
506                 field = ulp_rte_parser_fld_copy(field,
507                                                 &ipv4_spec->hdr.packet_id,
508                                                 size);
509                 size = sizeof(ipv4_spec->hdr.fragment_offset);
510                 field = ulp_rte_parser_fld_copy(field,
511                                                 &ipv4_spec->hdr.fragment_offset,
512                                                 size);
513                 size = sizeof(ipv4_spec->hdr.time_to_live);
514                 field = ulp_rte_parser_fld_copy(field,
515                                                 &ipv4_spec->hdr.time_to_live,
516                                                 size);
517                 size = sizeof(ipv4_spec->hdr.next_proto_id);
518                 field = ulp_rte_parser_fld_copy(field,
519                                                 &ipv4_spec->hdr.next_proto_id,
520                                                 size);
521                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
522                 field = ulp_rte_parser_fld_copy(field,
523                                                 &ipv4_spec->hdr.hdr_checksum,
524                                                 size);
525                 size = sizeof(ipv4_spec->hdr.src_addr);
526                 field = ulp_rte_parser_fld_copy(field,
527                                                 &ipv4_spec->hdr.src_addr,
528                                                 size);
529                 size = sizeof(ipv4_spec->hdr.dst_addr);
530                 field = ulp_rte_parser_fld_copy(field,
531                                                 &ipv4_spec->hdr.dst_addr,
532                                                 size);
533         }
534         if (ipv4_mask) {
535                 ulp_rte_prsr_mask_copy(params, &idx,
536                                        &ipv4_mask->hdr.version_ihl,
537                                        sizeof(ipv4_mask->hdr.version_ihl));
538                 ulp_rte_prsr_mask_copy(params, &idx,
539                                        &ipv4_mask->hdr.type_of_service,
540                                        sizeof(ipv4_mask->hdr.type_of_service));
541                 ulp_rte_prsr_mask_copy(params, &idx,
542                                        &ipv4_mask->hdr.total_length,
543                                        sizeof(ipv4_mask->hdr.total_length));
544                 ulp_rte_prsr_mask_copy(params, &idx,
545                                        &ipv4_mask->hdr.packet_id,
546                                        sizeof(ipv4_mask->hdr.packet_id));
547                 ulp_rte_prsr_mask_copy(params, &idx,
548                                        &ipv4_mask->hdr.fragment_offset,
549                                        sizeof(ipv4_mask->hdr.fragment_offset));
550                 ulp_rte_prsr_mask_copy(params, &idx,
551                                        &ipv4_mask->hdr.time_to_live,
552                                        sizeof(ipv4_mask->hdr.time_to_live));
553                 ulp_rte_prsr_mask_copy(params, &idx,
554                                        &ipv4_mask->hdr.next_proto_id,
555                                        sizeof(ipv4_mask->hdr.next_proto_id));
556                 ulp_rte_prsr_mask_copy(params, &idx,
557                                        &ipv4_mask->hdr.hdr_checksum,
558                                        sizeof(ipv4_mask->hdr.hdr_checksum));
559                 ulp_rte_prsr_mask_copy(params, &idx,
560                                        &ipv4_mask->hdr.src_addr,
561                                        sizeof(ipv4_mask->hdr.src_addr));
562                 ulp_rte_prsr_mask_copy(params, &idx,
563                                        &ipv4_mask->hdr.dst_addr,
564                                        sizeof(ipv4_mask->hdr.dst_addr));
565         }
566         /* Add the number of ipv4 header elements */
567         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
568
569         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
570         outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
571         if (outer_l3 ||
572             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
573             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
574                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
575                 inner_l3++;
576                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
577         } else {
578                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
579                 outer_l3++;
580                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
581         }
582         return BNXT_TF_RC_SUCCESS;
583 }
584
585 /* Function to handle the parsing of RTE Flow item IPV6 Header */
586 int32_t
587 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
588                          struct ulp_rte_parser_params *params)
589 {
590         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
591         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
592         struct ulp_rte_hdr_field *field;
593         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
594         uint32_t idx = params->field_idx;
595         uint32_t size;
596         uint32_t inner_l3, outer_l3;
597
598         inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
599         if (inner_l3) {
600                 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
601                 return BNXT_TF_RC_ERROR;
602         }
603
604         /*
605          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
606          * header fields
607          */
608         if (ipv6_spec) {
609                 size = sizeof(ipv6_spec->hdr.vtc_flow);
610                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
611                                                 &ipv6_spec->hdr.vtc_flow,
612                                                 size);
613                 size = sizeof(ipv6_spec->hdr.payload_len);
614                 field = ulp_rte_parser_fld_copy(field,
615                                                 &ipv6_spec->hdr.payload_len,
616                                                 size);
617                 size = sizeof(ipv6_spec->hdr.proto);
618                 field = ulp_rte_parser_fld_copy(field,
619                                                 &ipv6_spec->hdr.proto,
620                                                 size);
621                 size = sizeof(ipv6_spec->hdr.hop_limits);
622                 field = ulp_rte_parser_fld_copy(field,
623                                                 &ipv6_spec->hdr.hop_limits,
624                                                 size);
625                 size = sizeof(ipv6_spec->hdr.src_addr);
626                 field = ulp_rte_parser_fld_copy(field,
627                                                 &ipv6_spec->hdr.src_addr,
628                                                 size);
629                 size = sizeof(ipv6_spec->hdr.dst_addr);
630                 field = ulp_rte_parser_fld_copy(field,
631                                                 &ipv6_spec->hdr.dst_addr,
632                                                 size);
633         }
634         if (ipv6_mask) {
635                 ulp_rte_prsr_mask_copy(params, &idx,
636                                        &ipv6_mask->hdr.vtc_flow,
637                                        sizeof(ipv6_mask->hdr.vtc_flow));
638                 ulp_rte_prsr_mask_copy(params, &idx,
639                                        &ipv6_mask->hdr.payload_len,
640                                        sizeof(ipv6_mask->hdr.payload_len));
641                 ulp_rte_prsr_mask_copy(params, &idx,
642                                        &ipv6_mask->hdr.proto,
643                                        sizeof(ipv6_mask->hdr.proto));
644                 ulp_rte_prsr_mask_copy(params, &idx,
645                                        &ipv6_mask->hdr.hop_limits,
646                                        sizeof(ipv6_mask->hdr.hop_limits));
647                 ulp_rte_prsr_mask_copy(params, &idx,
648                                        &ipv6_mask->hdr.src_addr,
649                                        sizeof(ipv6_mask->hdr.src_addr));
650                 ulp_rte_prsr_mask_copy(params, &idx,
651                                        &ipv6_mask->hdr.dst_addr,
652                                        sizeof(ipv6_mask->hdr.dst_addr));
653         }
654         /* add number of ipv6 header elements */
655         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
656
657         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
658         outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
659         if (outer_l3 ||
660             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
661             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
662                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
663                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
664         } else {
665                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
666                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
667         }
668         return BNXT_TF_RC_SUCCESS;
669 }
670
671 /* Function to handle the parsing of RTE Flow item UDP Header. */
672 int32_t
673 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
674                         struct ulp_rte_parser_params *params)
675 {
676         const struct rte_flow_item_udp *udp_spec = item->spec;
677         const struct rte_flow_item_udp *udp_mask = item->mask;
678         struct ulp_rte_hdr_field *field;
679         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
680         uint32_t idx = params->field_idx;
681         uint32_t size;
682         uint32_t inner_l4, outer_l4;
683
684         inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
685         if (inner_l4) {
686                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
687                 return BNXT_TF_RC_ERROR;
688         }
689
690         /*
691          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
692          * header fields
693          */
694         if (udp_spec) {
695                 size = sizeof(udp_spec->hdr.src_port);
696                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
697                                                 &udp_spec->hdr.src_port,
698                                                 size);
699                 size = sizeof(udp_spec->hdr.dst_port);
700                 field = ulp_rte_parser_fld_copy(field,
701                                                 &udp_spec->hdr.dst_port,
702                                                 size);
703                 size = sizeof(udp_spec->hdr.dgram_len);
704                 field = ulp_rte_parser_fld_copy(field,
705                                                 &udp_spec->hdr.dgram_len,
706                                                 size);
707                 size = sizeof(udp_spec->hdr.dgram_cksum);
708                 field = ulp_rte_parser_fld_copy(field,
709                                                 &udp_spec->hdr.dgram_cksum,
710                                                 size);
711         }
712         if (udp_mask) {
713                 ulp_rte_prsr_mask_copy(params, &idx,
714                                        &udp_mask->hdr.src_port,
715                                        sizeof(udp_mask->hdr.src_port));
716                 ulp_rte_prsr_mask_copy(params, &idx,
717                                        &udp_mask->hdr.dst_port,
718                                        sizeof(udp_mask->hdr.dst_port));
719                 ulp_rte_prsr_mask_copy(params, &idx,
720                                        &udp_mask->hdr.dgram_len,
721                                        sizeof(udp_mask->hdr.dgram_len));
722                 ulp_rte_prsr_mask_copy(params, &idx,
723                                        &udp_mask->hdr.dgram_cksum,
724                                        sizeof(udp_mask->hdr.dgram_cksum));
725         }
726
727         /* Add number of UDP header elements */
728         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
729
730         /* Set the udp header bitmap and computed l4 header bitmaps */
731         outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
732         if (outer_l4 ||
733             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
734             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
735                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
736                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
737         } else {
738                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
739                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
740         }
741         return BNXT_TF_RC_SUCCESS;
742 }
743
744 /* Function to handle the parsing of RTE Flow item TCP Header. */
745 int32_t
746 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
747                         struct ulp_rte_parser_params *params)
748 {
749         const struct rte_flow_item_tcp *tcp_spec = item->spec;
750         const struct rte_flow_item_tcp *tcp_mask = item->mask;
751         struct ulp_rte_hdr_field *field;
752         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
753         uint32_t idx = params->field_idx;
754         uint32_t size;
755         uint32_t inner_l4, outer_l4;
756
757         inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
758         if (inner_l4) {
759                 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
760                 return BNXT_TF_RC_ERROR;
761         }
762
763         /*
764          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
765          * header fields
766          */
767         if (tcp_spec) {
768                 size = sizeof(tcp_spec->hdr.src_port);
769                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
770                                                 &tcp_spec->hdr.src_port,
771                                                 size);
772                 size = sizeof(tcp_spec->hdr.dst_port);
773                 field = ulp_rte_parser_fld_copy(field,
774                                                 &tcp_spec->hdr.dst_port,
775                                                 size);
776                 size = sizeof(tcp_spec->hdr.sent_seq);
777                 field = ulp_rte_parser_fld_copy(field,
778                                                 &tcp_spec->hdr.sent_seq,
779                                                 size);
780                 size = sizeof(tcp_spec->hdr.recv_ack);
781                 field = ulp_rte_parser_fld_copy(field,
782                                                 &tcp_spec->hdr.recv_ack,
783                                                 size);
784                 size = sizeof(tcp_spec->hdr.data_off);
785                 field = ulp_rte_parser_fld_copy(field,
786                                                 &tcp_spec->hdr.data_off,
787                                                 size);
788                 size = sizeof(tcp_spec->hdr.tcp_flags);
789                 field = ulp_rte_parser_fld_copy(field,
790                                                 &tcp_spec->hdr.tcp_flags,
791                                                 size);
792                 size = sizeof(tcp_spec->hdr.rx_win);
793                 field = ulp_rte_parser_fld_copy(field,
794                                                 &tcp_spec->hdr.rx_win,
795                                                 size);
796                 size = sizeof(tcp_spec->hdr.cksum);
797                 field = ulp_rte_parser_fld_copy(field,
798                                                 &tcp_spec->hdr.cksum,
799                                                 size);
800                 size = sizeof(tcp_spec->hdr.tcp_urp);
801                 field = ulp_rte_parser_fld_copy(field,
802                                                 &tcp_spec->hdr.tcp_urp,
803                                                 size);
804         } else {
805                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
806         }
807
808         if (tcp_mask) {
809                 ulp_rte_prsr_mask_copy(params, &idx,
810                                        &tcp_mask->hdr.src_port,
811                                        sizeof(tcp_mask->hdr.src_port));
812                 ulp_rte_prsr_mask_copy(params, &idx,
813                                        &tcp_mask->hdr.dst_port,
814                                        sizeof(tcp_mask->hdr.dst_port));
815                 ulp_rte_prsr_mask_copy(params, &idx,
816                                        &tcp_mask->hdr.sent_seq,
817                                        sizeof(tcp_mask->hdr.sent_seq));
818                 ulp_rte_prsr_mask_copy(params, &idx,
819                                        &tcp_mask->hdr.recv_ack,
820                                        sizeof(tcp_mask->hdr.recv_ack));
821                 ulp_rte_prsr_mask_copy(params, &idx,
822                                        &tcp_mask->hdr.data_off,
823                                        sizeof(tcp_mask->hdr.data_off));
824                 ulp_rte_prsr_mask_copy(params, &idx,
825                                        &tcp_mask->hdr.tcp_flags,
826                                        sizeof(tcp_mask->hdr.tcp_flags));
827                 ulp_rte_prsr_mask_copy(params, &idx,
828                                        &tcp_mask->hdr.rx_win,
829                                        sizeof(tcp_mask->hdr.rx_win));
830                 ulp_rte_prsr_mask_copy(params, &idx,
831                                        &tcp_mask->hdr.cksum,
832                                        sizeof(tcp_mask->hdr.cksum));
833                 ulp_rte_prsr_mask_copy(params, &idx,
834                                        &tcp_mask->hdr.tcp_urp,
835                                        sizeof(tcp_mask->hdr.tcp_urp));
836         }
837         /* add number of TCP header elements */
838         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
839
840         /* Set the udp header bitmap and computed l4 header bitmaps */
841         outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
842         if (outer_l4 ||
843             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
844             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
845                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
846                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
847         } else {
848                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
849                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
850         }
851         return BNXT_TF_RC_SUCCESS;
852 }
853
854 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
855 int32_t
856 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
857                           struct ulp_rte_parser_params *params)
858 {
859         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
860         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
861         struct ulp_rte_hdr_field *field;
862         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
863         uint32_t idx = params->field_idx;
864         uint32_t size;
865
866         /*
867          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
868          * header fields
869          */
870         if (vxlan_spec) {
871                 size = sizeof(vxlan_spec->flags);
872                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
873                                                 &vxlan_spec->flags,
874                                                 size);
875                 size = sizeof(vxlan_spec->rsvd0);
876                 field = ulp_rte_parser_fld_copy(field,
877                                                 &vxlan_spec->rsvd0,
878                                                 size);
879                 size = sizeof(vxlan_spec->vni);
880                 field = ulp_rte_parser_fld_copy(field,
881                                                 &vxlan_spec->vni,
882                                                 size);
883                 size = sizeof(vxlan_spec->rsvd1);
884                 field = ulp_rte_parser_fld_copy(field,
885                                                 &vxlan_spec->rsvd1,
886                                                 size);
887         }
888         if (vxlan_mask) {
889                 ulp_rte_prsr_mask_copy(params, &idx,
890                                        &vxlan_mask->flags,
891                                        sizeof(vxlan_mask->flags));
892                 ulp_rte_prsr_mask_copy(params, &idx,
893                                        &vxlan_mask->rsvd0,
894                                        sizeof(vxlan_mask->rsvd0));
895                 ulp_rte_prsr_mask_copy(params, &idx,
896                                        &vxlan_mask->vni,
897                                        sizeof(vxlan_mask->vni));
898                 ulp_rte_prsr_mask_copy(params, &idx,
899                                        &vxlan_mask->rsvd1,
900                                        sizeof(vxlan_mask->rsvd1));
901         }
902         /* Add number of vxlan header elements */
903         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
904
905         /* Update the hdr_bitmap with vxlan */
906         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
907         return BNXT_TF_RC_SUCCESS;
908 }
909
910 /* Function to handle the parsing of RTE Flow item void Header */
911 int32_t
912 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
913                          struct ulp_rte_parser_params *params __rte_unused)
914 {
915         return BNXT_TF_RC_SUCCESS;
916 }
917
918 /* Function to handle the parsing of RTE Flow action void Header. */
919 int32_t
920 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
921                          struct ulp_rte_parser_params *params __rte_unused)
922 {
923         return BNXT_TF_RC_SUCCESS;
924 }
925
926 /* Function to handle the parsing of RTE Flow action Mark Header. */
927 int32_t
928 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
929                          struct ulp_rte_parser_params *param)
930 {
931         const struct rte_flow_action_mark *mark;
932         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
933         uint32_t mark_id;
934
935         mark = action_item->conf;
936         if (mark) {
937                 mark_id = tfp_cpu_to_be_32(mark->id);
938                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
939                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
940
941                 /* Update the hdr_bitmap with vxlan */
942                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
943                 return BNXT_TF_RC_SUCCESS;
944         }
945         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
946         return BNXT_TF_RC_ERROR;
947 }
948
949 /* Function to handle the parsing of RTE Flow action RSS Header. */
950 int32_t
951 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
952                         struct ulp_rte_parser_params *param)
953 {
954         const struct rte_flow_action_rss *rss = action_item->conf;
955
956         if (rss) {
957                 /* Update the hdr_bitmap with vxlan */
958                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
959                 return BNXT_TF_RC_SUCCESS;
960         }
961         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
962         return BNXT_TF_RC_ERROR;
963 }
964
965 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
966 int32_t
967 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
968                                 struct ulp_rte_parser_params *params)
969 {
970         const struct rte_flow_action_vxlan_encap *vxlan_encap;
971         const struct rte_flow_item *item;
972         const struct rte_flow_item_eth *eth_spec;
973         const struct rte_flow_item_ipv4 *ipv4_spec;
974         const struct rte_flow_item_ipv6 *ipv6_spec;
975         struct rte_flow_item_vxlan vxlan_spec;
976         uint32_t vlan_num = 0, vlan_size = 0;
977         uint32_t ip_size = 0, ip_type = 0;
978         uint32_t vxlan_size = 0;
979         uint8_t *buff;
980         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
981         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
982                                     0x00, 0x40, 0x11};
983         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
984         struct ulp_rte_act_prop *ap = &params->act_prop;
985
986         vxlan_encap = action_item->conf;
987         if (!vxlan_encap) {
988                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
989                 return BNXT_TF_RC_ERROR;
990         }
991
992         item = vxlan_encap->definition;
993         if (!item) {
994                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
995                 return BNXT_TF_RC_ERROR;
996         }
997
998         if (!ulp_rte_item_skip_void(&item, 0))
999                 return BNXT_TF_RC_ERROR;
1000
1001         /* must have ethernet header */
1002         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1003                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1004                 return BNXT_TF_RC_ERROR;
1005         }
1006         eth_spec = item->spec;
1007         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1008         ulp_encap_buffer_copy(buff,
1009                               eth_spec->dst.addr_bytes,
1010                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1011
1012         /* Goto the next item */
1013         if (!ulp_rte_item_skip_void(&item, 1))
1014                 return BNXT_TF_RC_ERROR;
1015
1016         /* May have vlan header */
1017         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1018                 vlan_num++;
1019                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1020                 ulp_encap_buffer_copy(buff,
1021                                       item->spec,
1022                                       sizeof(struct rte_flow_item_vlan));
1023
1024                 if (!ulp_rte_item_skip_void(&item, 1))
1025                         return BNXT_TF_RC_ERROR;
1026         }
1027
1028         /* may have two vlan headers */
1029         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1030                 vlan_num++;
1031                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1032                        sizeof(struct rte_flow_item_vlan)],
1033                        item->spec,
1034                        sizeof(struct rte_flow_item_vlan));
1035                 if (!ulp_rte_item_skip_void(&item, 1))
1036                         return BNXT_TF_RC_ERROR;
1037         }
1038         /* Update the vlan count and size of more than one */
1039         if (vlan_num) {
1040                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1041                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1042                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1043                        &vlan_num,
1044                        sizeof(uint32_t));
1045                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1046                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1047                        &vlan_size,
1048                        sizeof(uint32_t));
1049         }
1050
1051         /* L3 must be IPv4, IPv6 */
1052         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1053                 ipv4_spec = item->spec;
1054                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1055
1056                 /* copy the ipv4 details */
1057                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1058                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1059                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1060                         ulp_encap_buffer_copy(buff,
1061                                               def_ipv4_hdr,
1062                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1063                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1064                 } else {
1065                         const uint8_t *tmp_buff;
1066
1067                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1068                         ulp_encap_buffer_copy(buff,
1069                                               &ipv4_spec->hdr.version_ihl,
1070                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1071                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1072                              BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1073                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1074                         ulp_encap_buffer_copy(buff,
1075                                               tmp_buff,
1076                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1077                 }
1078                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1079                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1080                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1081                 ulp_encap_buffer_copy(buff,
1082                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1083                                       BNXT_ULP_ENCAP_IPV4_DEST_IP);
1084
1085                 /* Update the ip size details */
1086                 ip_size = tfp_cpu_to_be_32(ip_size);
1087                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1088                        &ip_size, sizeof(uint32_t));
1089
1090                 /* update the ip type */
1091                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1092                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1093                        &ip_type, sizeof(uint32_t));
1094
1095                 if (!ulp_rte_item_skip_void(&item, 1))
1096                         return BNXT_TF_RC_ERROR;
1097         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1098                 ipv6_spec = item->spec;
1099                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1100
1101                 /* copy the ipv4 details */
1102                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1103                        ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1104
1105                 /* Update the ip size details */
1106                 ip_size = tfp_cpu_to_be_32(ip_size);
1107                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1108                        &ip_size, sizeof(uint32_t));
1109
1110                  /* update the ip type */
1111                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1112                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1113                        &ip_type, sizeof(uint32_t));
1114
1115                 if (!ulp_rte_item_skip_void(&item, 1))
1116                         return BNXT_TF_RC_ERROR;
1117         } else {
1118                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1119                 return BNXT_TF_RC_ERROR;
1120         }
1121
1122         /* L4 is UDP */
1123         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1124                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1125                 return BNXT_TF_RC_ERROR;
1126         }
1127         /* copy the udp details */
1128         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1129                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1130
1131         if (!ulp_rte_item_skip_void(&item, 1))
1132                 return BNXT_TF_RC_ERROR;
1133
1134         /* Finally VXLAN */
1135         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1136                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1137                 return BNXT_TF_RC_ERROR;
1138         }
1139         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1140         /* copy the vxlan details */
1141         memcpy(&vxlan_spec, item->spec, vxlan_size);
1142         vxlan_spec.flags = 0x08;
1143         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1144                               (const uint8_t *)&vxlan_spec,
1145                               vxlan_size);
1146         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1147         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1148                &vxlan_size, sizeof(uint32_t));
1149
1150         /*update the hdr_bitmap with vxlan */
1151         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1152         return BNXT_TF_RC_SUCCESS;
1153 }
1154
1155 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1156 int32_t
1157 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1158                                 __rte_unused,
1159                                 struct ulp_rte_parser_params *params)
1160 {
1161         /* update the hdr_bitmap with vxlan */
1162         ULP_BITMAP_SET(params->act_bitmap.bits,
1163                        BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1164         return BNXT_TF_RC_SUCCESS;
1165 }
1166
1167 /* Function to handle the parsing of RTE Flow action drop Header. */
1168 int32_t
1169 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1170                          struct ulp_rte_parser_params *params)
1171 {
1172         /* Update the hdr_bitmap with drop */
1173         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1174         return BNXT_TF_RC_SUCCESS;
1175 }
1176
1177 /* Function to handle the parsing of RTE Flow action count. */
1178 int32_t
1179 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1180                           struct ulp_rte_parser_params *params)
1181
1182 {
1183         const struct rte_flow_action_count *act_count;
1184         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1185
1186         act_count = action_item->conf;
1187         if (act_count) {
1188                 if (act_count->shared) {
1189                         BNXT_TF_DBG(ERR,
1190                                     "Parse Error:Shared count not supported\n");
1191                         return BNXT_TF_RC_PARSE_ERR;
1192                 }
1193                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1194                        &act_count->id,
1195                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1196         }
1197
1198         /* Update the hdr_bitmap with count */
1199         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1200         return BNXT_TF_RC_SUCCESS;
1201 }
1202
1203 /* Function to handle the parsing of RTE Flow action PF. */
1204 int32_t
1205 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1206                        struct ulp_rte_parser_params *params)
1207 {
1208         uint32_t svif;
1209
1210         /* Update the hdr_bitmap with vnic bit */
1211         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1212
1213         /* copy the PF of the current device into VNIC Property */
1214         svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1215         svif = bnxt_get_vnic_id(svif);
1216         svif = rte_cpu_to_be_32(svif);
1217         memcpy(&params->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1218                &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1219
1220         return BNXT_TF_RC_SUCCESS;
1221 }
1222
1223 /* Function to handle the parsing of RTE Flow action VF. */
1224 int32_t
1225 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1226                        struct ulp_rte_parser_params *param)
1227 {
1228         const struct rte_flow_action_vf *vf_action;
1229         uint32_t pid;
1230
1231         vf_action = action_item->conf;
1232         if (vf_action) {
1233                 if (vf_action->original) {
1234                         BNXT_TF_DBG(ERR,
1235                                     "Parse Error:VF Original not supported\n");
1236                         return BNXT_TF_RC_PARSE_ERR;
1237                 }
1238                 /* TBD: Update the computed VNIC using VF conversion */
1239                 pid = bnxt_get_vnic_id(vf_action->id);
1240                 pid = rte_cpu_to_be_32(pid);
1241                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1242                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1243         }
1244
1245         /* Update the hdr_bitmap with count */
1246         ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1247         return BNXT_TF_RC_SUCCESS;
1248 }
1249
1250 /* Function to handle the parsing of RTE Flow action port_id. */
1251 int32_t
1252 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1253                             struct ulp_rte_parser_params *param)
1254 {
1255         const struct rte_flow_action_port_id *port_id;
1256         uint32_t pid;
1257
1258         port_id = act_item->conf;
1259         if (port_id) {
1260                 if (port_id->original) {
1261                         BNXT_TF_DBG(ERR,
1262                                     "ParseErr:Portid Original not supported\n");
1263                         return BNXT_TF_RC_PARSE_ERR;
1264                 }
1265                 /* TBD: Update the computed VNIC using port conversion */
1266                 pid = bnxt_get_vnic_id(port_id->id);
1267                 pid = rte_cpu_to_be_32(pid);
1268                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1269                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1270         }
1271
1272         /* Update the hdr_bitmap with count */
1273         ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1274         return BNXT_TF_RC_SUCCESS;
1275 }
1276
1277 /* Function to handle the parsing of RTE Flow action phy_port. */
1278 int32_t
1279 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1280                              struct ulp_rte_parser_params *prm)
1281 {
1282         const struct rte_flow_action_phy_port *phy_port;
1283         uint32_t pid;
1284
1285         phy_port = action_item->conf;
1286         if (phy_port) {
1287                 if (phy_port->original) {
1288                         BNXT_TF_DBG(ERR,
1289                                     "Parse Err:Port Original not supported\n");
1290                         return BNXT_TF_RC_PARSE_ERR;
1291                 }
1292                 pid = bnxt_get_vnic_id(phy_port->index);
1293                 pid = rte_cpu_to_be_32(pid);
1294                 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1295                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1296         }
1297
1298         /* Update the hdr_bitmap with count */
1299         ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1300         return BNXT_TF_RC_SUCCESS;
1301 }