net/bnxt: use hashing for flow template match
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
12 #include "tfp.h"
13
14 /* Utility function to skip the void items. */
15 static inline int32_t
16 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
17 {
18         if (!*item)
19                 return 0;
20         if (increment)
21                 (*item)++;
22         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
23                 (*item)++;
24         if (*item)
25                 return 1;
26         return 0;
27 }
28
29 /* Utility function to update the field_bitmap */
30 static void
31 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
32                                    uint32_t idx)
33 {
34         struct ulp_rte_hdr_field *field;
35
36         field = &params->hdr_field[idx];
37         if (ulp_bitmap_notzero(field->mask, field->size)) {
38                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
39                 /* Not exact match */
40                 if (!ulp_bitmap_is_ones(field->mask, field->size))
41                         ULP_BITMAP_SET(params->fld_bitmap.bits,
42                                        BNXT_ULP_MATCH_TYPE_BITMASK_WM);
43         } else {
44                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
45         }
46 }
47
48 /* Utility function to copy field spec items */
49 static struct ulp_rte_hdr_field *
50 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
51                         const void *buffer,
52                         uint32_t size)
53 {
54         field->size = size;
55         memcpy(field->spec, buffer, field->size);
56         field++;
57         return field;
58 }
59
60 /* Utility function to copy field masks items */
61 static void
62 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
63                        uint32_t *idx,
64                        const void *buffer,
65                        uint32_t size)
66 {
67         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
68
69         memcpy(field->mask, buffer, size);
70         ulp_rte_parser_field_bitmap_update(params, *idx);
71         *idx = *idx + 1;
72 }
73
74 /*
75  * Function to handle the parsing of RTE Flows and placing
76  * the RTE flow items into the ulp structures.
77  */
78 int32_t
79 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
80                               struct ulp_rte_parser_params *params)
81 {
82         const struct rte_flow_item *item = pattern;
83         struct bnxt_ulp_rte_hdr_info *hdr_info;
84
85         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
86         if (params->dir == ULP_DIR_EGRESS)
87                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
88                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
89
90         /* Parse all the items in the pattern */
91         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
92                 /* get the header information from the flow_hdr_info table */
93                 hdr_info = &ulp_hdr_info[item->type];
94                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
95                         BNXT_TF_DBG(ERR,
96                                     "Truflow parser does not support type %d\n",
97                                     item->type);
98                         return BNXT_TF_RC_PARSE_ERR;
99                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
100                         /* call the registered callback handler */
101                         if (hdr_info->proto_hdr_func) {
102                                 if (hdr_info->proto_hdr_func(item, params) !=
103                                     BNXT_TF_RC_SUCCESS) {
104                                         return BNXT_TF_RC_ERROR;
105                                 }
106                         }
107                 }
108                 item++;
109         }
110         /* update the implied SVIF */
111         (void)ulp_rte_parser_svif_process(params);
112         return BNXT_TF_RC_SUCCESS;
113 }
114
115 /*
116  * Function to handle the parsing of RTE Flows and placing
117  * the RTE flow actions into the ulp structures.
118  */
119 int32_t
120 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
121                               struct ulp_rte_parser_params *params)
122 {
123         const struct rte_flow_action *action_item = actions;
124         struct bnxt_ulp_rte_act_info *hdr_info;
125
126         /* Parse all the items in the pattern */
127         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
128                 /* get the header information from the flow_hdr_info table */
129                 hdr_info = &ulp_act_info[action_item->type];
130                 if (hdr_info->act_type ==
131                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
132                         BNXT_TF_DBG(ERR,
133                                     "Truflow parser does not support act %u\n",
134                                     action_item->type);
135                         return BNXT_TF_RC_ERROR;
136                 } else if (hdr_info->act_type ==
137                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
138                         /* call the registered callback handler */
139                         if (hdr_info->proto_act_func) {
140                                 if (hdr_info->proto_act_func(action_item,
141                                                              params) !=
142                                     BNXT_TF_RC_SUCCESS) {
143                                         return BNXT_TF_RC_ERROR;
144                                 }
145                         }
146                 }
147                 action_item++;
148         }
149         /* update the implied VNIC */
150         ulp_rte_parser_vnic_process(params);
151         return BNXT_TF_RC_SUCCESS;
152 }
153
154 /* Function to handle the parsing of RTE Flow item PF Header. */
155 static int32_t
156 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
157                         enum rte_flow_item_type proto,
158                         uint16_t svif,
159                         uint16_t mask)
160 {
161         uint16_t port_id = svif;
162         uint32_t dir = 0;
163         struct ulp_rte_hdr_field *hdr_field;
164
165         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF)) {
166                 BNXT_TF_DBG(ERR,
167                             "SVIF already set,multiple source not support'd\n");
168                 return BNXT_TF_RC_ERROR;
169         }
170
171         /*update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF */
172         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF);
173
174         if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
175                 dir = ULP_UTIL_CHF_IDX_RD(params,
176                                           BNXT_ULP_CHF_IDX_DIRECTION);
177                 /* perform the conversion from dpdk port to bnxt svif */
178                 if (dir == ULP_DIR_EGRESS)
179                         svif = bnxt_get_svif(port_id, true);
180                 else
181                         svif = bnxt_get_svif(port_id, false);
182         }
183         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
184         memcpy(hdr_field->spec, &svif, sizeof(svif));
185         memcpy(hdr_field->mask, &mask, sizeof(mask));
186         hdr_field->size = sizeof(svif);
187         return BNXT_TF_RC_SUCCESS;
188 }
189
190 /* Function to handle the parsing of the RTE port id */
191 int32_t
192 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
193 {
194         uint16_t port_id = 0;
195         uint16_t svif_mask = 0xFFFF;
196
197         if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_SVIF))
198                 return BNXT_TF_RC_SUCCESS;
199
200         /* SVIF not set. So get the port id */
201         port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF);
202
203         /* Update the SVIF details */
204         return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
205                                        port_id, svif_mask);
206 }
207
208 /* Function to handle the implicit VNIC RTE port id */
209 int32_t
210 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
211 {
212         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
213
214         if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
215             ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
216                 return BNXT_TF_RC_SUCCESS;
217
218         /* Update the vnic details */
219         ulp_rte_pf_act_handler(NULL, params);
220         return BNXT_TF_RC_SUCCESS;
221 }
222
223 /* Function to handle the parsing of RTE Flow item PF Header. */
224 int32_t
225 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
226                        struct ulp_rte_parser_params *params)
227 {
228         uint16_t port_id = 0;
229         uint16_t svif_mask = 0xFFFF;
230
231         /* Get the port id */
232         port_id = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF);
233
234         /* Update the SVIF details */
235         return ulp_rte_parser_svif_set(params,
236                                        item->type,
237                                        port_id, svif_mask);
238 }
239
240 /* Function to handle the parsing of RTE Flow item VF Header. */
241 int32_t
242 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
243                        struct ulp_rte_parser_params *params)
244 {
245         const struct rte_flow_item_vf *vf_spec = item->spec;
246         const struct rte_flow_item_vf *vf_mask = item->mask;
247         uint16_t svif = 0, mask = 0;
248
249         /* Get VF rte_flow_item for Port details */
250         if (vf_spec)
251                 svif = (uint16_t)vf_spec->id;
252         if (vf_mask)
253                 mask = (uint16_t)vf_mask->id;
254
255         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
256 }
257
258 /* Function to handle the parsing of RTE Flow item port id  Header. */
259 int32_t
260 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
261                             struct ulp_rte_parser_params *params)
262 {
263         const struct rte_flow_item_port_id *port_spec = item->spec;
264         const struct rte_flow_item_port_id *port_mask = item->mask;
265         uint16_t svif = 0, mask = 0;
266
267         /*
268          * Copy the rte_flow_item for Port into hdr_field using port id
269          * header fields.
270          */
271         if (port_spec)
272                 svif = (uint16_t)port_spec->id;
273         if (port_mask)
274                 mask = (uint16_t)port_mask->id;
275
276         /* Update the SVIF details */
277         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
278 }
279
280 /* Function to handle the parsing of RTE Flow item phy port Header. */
281 int32_t
282 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
283                              struct ulp_rte_parser_params *params)
284 {
285         const struct rte_flow_item_phy_port *port_spec = item->spec;
286         const struct rte_flow_item_phy_port *port_mask = item->mask;
287         uint32_t svif = 0, mask = 0;
288
289         /* Copy the rte_flow_item for phy port into hdr_field */
290         if (port_spec)
291                 svif = port_spec->index;
292         if (port_mask)
293                 mask = port_mask->index;
294
295         /* Update the SVIF details */
296         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
297 }
298
299 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
300 int32_t
301 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
302                         struct ulp_rte_parser_params *params)
303 {
304         const struct rte_flow_item_eth *eth_spec = item->spec;
305         const struct rte_flow_item_eth *eth_mask = item->mask;
306         struct ulp_rte_hdr_field *field;
307         uint32_t idx = params->field_idx;
308         uint64_t set_flag = 0;
309         uint32_t size;
310
311         /*
312          * Copy the rte_flow_item for eth into hdr_field using ethernet
313          * header fields
314          */
315         if (eth_spec) {
316                 size = sizeof(eth_spec->dst.addr_bytes);
317                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
318                                                 eth_spec->dst.addr_bytes,
319                                                 size);
320                 size = sizeof(eth_spec->src.addr_bytes);
321                 field = ulp_rte_parser_fld_copy(field,
322                                                 eth_spec->src.addr_bytes,
323                                                 size);
324                 field = ulp_rte_parser_fld_copy(field,
325                                                 &eth_spec->type,
326                                                 sizeof(eth_spec->type));
327         }
328         if (eth_mask) {
329                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
330                                        sizeof(eth_mask->dst.addr_bytes));
331                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
332                                        sizeof(eth_mask->src.addr_bytes));
333                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
334                                        sizeof(eth_mask->type));
335         }
336         /* Add number of vlan header elements */
337         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
338         params->vlan_idx = params->field_idx;
339         params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
340
341         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
342         set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
343                                     BNXT_ULP_HDR_BIT_O_ETH);
344         if (set_flag)
345                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
346         else
347                 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
348                                  BNXT_ULP_HDR_BIT_I_ETH);
349
350         /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
351         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
352
353         return BNXT_TF_RC_SUCCESS;
354 }
355
356 /* Function to handle the parsing of RTE Flow item Vlan Header. */
357 int32_t
358 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
359                          struct ulp_rte_parser_params *params)
360 {
361         const struct rte_flow_item_vlan *vlan_spec = item->spec;
362         const struct rte_flow_item_vlan *vlan_mask = item->mask;
363         struct ulp_rte_hdr_field *field;
364         struct ulp_rte_hdr_bitmap       *hdr_bit;
365         uint32_t idx = params->vlan_idx;
366         uint16_t vlan_tag, priority;
367         uint32_t outer_vtag_num;
368         uint32_t inner_vtag_num;
369
370         /*
371          * Copy the rte_flow_item for vlan into hdr_field using Vlan
372          * header fields
373          */
374         if (vlan_spec) {
375                 vlan_tag = ntohs(vlan_spec->tci);
376                 priority = htons(vlan_tag >> 13);
377                 vlan_tag &= 0xfff;
378                 vlan_tag = htons(vlan_tag);
379
380                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
381                                                 &priority,
382                                                 sizeof(priority));
383                 field = ulp_rte_parser_fld_copy(field,
384                                                 &vlan_tag,
385                                                 sizeof(vlan_tag));
386                 field = ulp_rte_parser_fld_copy(field,
387                                                 &vlan_spec->inner_type,
388                                                 sizeof(vlan_spec->inner_type));
389         }
390
391         if (vlan_mask) {
392                 vlan_tag = ntohs(vlan_mask->tci);
393                 priority = htons(vlan_tag >> 13);
394                 vlan_tag &= 0xfff;
395                 vlan_tag = htons(vlan_tag);
396
397                 field = &params->hdr_field[idx];
398                 memcpy(field->mask, &priority, field->size);
399                 field++;
400                 memcpy(field->mask, &vlan_tag, field->size);
401                 field++;
402                 memcpy(field->mask, &vlan_mask->inner_type, field->size);
403         }
404         /* Set the vlan index to new incremented value */
405         params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
406
407         /* Get the outer tag and inner tag counts */
408         outer_vtag_num = ULP_UTIL_CHF_IDX_RD(params,
409                                              BNXT_ULP_CHF_IDX_O_VTAG_NUM);
410         inner_vtag_num = ULP_UTIL_CHF_IDX_RD(params,
411                                              BNXT_ULP_CHF_IDX_I_VTAG_NUM);
412
413         /* Update the hdr_bitmap of the vlans */
414         hdr_bit = &params->hdr_bitmap;
415         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
416             !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) {
417                 /* Set the outer vlan bit and update the vlan tag num */
418                 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN);
419                 outer_vtag_num++;
420                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM,
421                                     outer_vtag_num);
422                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_PRESENT, 1);
423         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
424                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) &&
425                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN)) {
426                 /* Set the outer vlan bit and update the vlan tag num */
427                 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN);
428                 outer_vtag_num++;
429                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_VTAG_NUM,
430                                     outer_vtag_num);
431                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_TWO_VTAGS, 1);
432         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
433                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) &&
434                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN) &&
435                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
436                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN)) {
437                 /* Set the inner vlan bit and update the vlan tag num */
438                 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN);
439                 inner_vtag_num++;
440                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM,
441                                     inner_vtag_num);
442                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_PRESENT, 1);
443         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
444                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OO_VLAN) &&
445                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_OI_VLAN) &&
446                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
447                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_IO_VLAN) &&
448                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_II_VLAN)) {
449                 /* Set the inner vlan bit and update the vlan tag num */
450                 ULP_BITMAP_SET(hdr_bit->bits, BNXT_ULP_HDR_BIT_II_VLAN);
451                 inner_vtag_num++;
452                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_VTAG_NUM,
453                                     inner_vtag_num);
454                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_TWO_VTAGS, 1);
455         } else {
456                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
457                 return BNXT_TF_RC_ERROR;
458         }
459         return BNXT_TF_RC_SUCCESS;
460 }
461
462 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
463 int32_t
464 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
465                          struct ulp_rte_parser_params *params)
466 {
467         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
468         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
469         struct ulp_rte_hdr_field *field;
470         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
471         uint32_t idx = params->field_idx;
472         uint32_t size;
473         uint32_t inner_l3, outer_l3;
474
475         inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3);
476         if (inner_l3) {
477                 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
478                 return BNXT_TF_RC_ERROR;
479         }
480
481         /*
482          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
483          * header fields
484          */
485         if (ipv4_spec) {
486                 size = sizeof(ipv4_spec->hdr.version_ihl);
487                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
488                                                 &ipv4_spec->hdr.version_ihl,
489                                                 size);
490                 size = sizeof(ipv4_spec->hdr.type_of_service);
491                 field = ulp_rte_parser_fld_copy(field,
492                                                 &ipv4_spec->hdr.type_of_service,
493                                                 size);
494                 size = sizeof(ipv4_spec->hdr.total_length);
495                 field = ulp_rte_parser_fld_copy(field,
496                                                 &ipv4_spec->hdr.total_length,
497                                                 size);
498                 size = sizeof(ipv4_spec->hdr.packet_id);
499                 field = ulp_rte_parser_fld_copy(field,
500                                                 &ipv4_spec->hdr.packet_id,
501                                                 size);
502                 size = sizeof(ipv4_spec->hdr.fragment_offset);
503                 field = ulp_rte_parser_fld_copy(field,
504                                                 &ipv4_spec->hdr.fragment_offset,
505                                                 size);
506                 size = sizeof(ipv4_spec->hdr.time_to_live);
507                 field = ulp_rte_parser_fld_copy(field,
508                                                 &ipv4_spec->hdr.time_to_live,
509                                                 size);
510                 size = sizeof(ipv4_spec->hdr.next_proto_id);
511                 field = ulp_rte_parser_fld_copy(field,
512                                                 &ipv4_spec->hdr.next_proto_id,
513                                                 size);
514                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
515                 field = ulp_rte_parser_fld_copy(field,
516                                                 &ipv4_spec->hdr.hdr_checksum,
517                                                 size);
518                 size = sizeof(ipv4_spec->hdr.src_addr);
519                 field = ulp_rte_parser_fld_copy(field,
520                                                 &ipv4_spec->hdr.src_addr,
521                                                 size);
522                 size = sizeof(ipv4_spec->hdr.dst_addr);
523                 field = ulp_rte_parser_fld_copy(field,
524                                                 &ipv4_spec->hdr.dst_addr,
525                                                 size);
526         }
527         if (ipv4_mask) {
528                 ulp_rte_prsr_mask_copy(params, &idx,
529                                        &ipv4_mask->hdr.version_ihl,
530                                        sizeof(ipv4_mask->hdr.version_ihl));
531                 ulp_rte_prsr_mask_copy(params, &idx,
532                                        &ipv4_mask->hdr.type_of_service,
533                                        sizeof(ipv4_mask->hdr.type_of_service));
534                 ulp_rte_prsr_mask_copy(params, &idx,
535                                        &ipv4_mask->hdr.total_length,
536                                        sizeof(ipv4_mask->hdr.total_length));
537                 ulp_rte_prsr_mask_copy(params, &idx,
538                                        &ipv4_mask->hdr.packet_id,
539                                        sizeof(ipv4_mask->hdr.packet_id));
540                 ulp_rte_prsr_mask_copy(params, &idx,
541                                        &ipv4_mask->hdr.fragment_offset,
542                                        sizeof(ipv4_mask->hdr.fragment_offset));
543                 ulp_rte_prsr_mask_copy(params, &idx,
544                                        &ipv4_mask->hdr.time_to_live,
545                                        sizeof(ipv4_mask->hdr.time_to_live));
546                 ulp_rte_prsr_mask_copy(params, &idx,
547                                        &ipv4_mask->hdr.next_proto_id,
548                                        sizeof(ipv4_mask->hdr.next_proto_id));
549                 ulp_rte_prsr_mask_copy(params, &idx,
550                                        &ipv4_mask->hdr.hdr_checksum,
551                                        sizeof(ipv4_mask->hdr.hdr_checksum));
552                 ulp_rte_prsr_mask_copy(params, &idx,
553                                        &ipv4_mask->hdr.src_addr,
554                                        sizeof(ipv4_mask->hdr.src_addr));
555                 ulp_rte_prsr_mask_copy(params, &idx,
556                                        &ipv4_mask->hdr.dst_addr,
557                                        sizeof(ipv4_mask->hdr.dst_addr));
558         }
559         /* Add the number of ipv4 header elements */
560         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
561
562         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
563         outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3);
564         if (outer_l3 ||
565             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
566             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
567                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
568                 inner_l3++;
569                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, inner_l3);
570         } else {
571                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
572                 outer_l3++;
573                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, outer_l3);
574         }
575         return BNXT_TF_RC_SUCCESS;
576 }
577
578 /* Function to handle the parsing of RTE Flow item IPV6 Header */
579 int32_t
580 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
581                          struct ulp_rte_parser_params *params)
582 {
583         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
584         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
585         struct ulp_rte_hdr_field *field;
586         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
587         uint32_t idx = params->field_idx;
588         uint32_t size;
589         uint32_t inner_l3, outer_l3;
590
591         inner_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L3);
592         if (inner_l3) {
593                 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
594                 return BNXT_TF_RC_ERROR;
595         }
596
597         /*
598          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
599          * header fields
600          */
601         if (ipv6_spec) {
602                 size = sizeof(ipv6_spec->hdr.vtc_flow);
603                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
604                                                 &ipv6_spec->hdr.vtc_flow,
605                                                 size);
606                 size = sizeof(ipv6_spec->hdr.payload_len);
607                 field = ulp_rte_parser_fld_copy(field,
608                                                 &ipv6_spec->hdr.payload_len,
609                                                 size);
610                 size = sizeof(ipv6_spec->hdr.proto);
611                 field = ulp_rte_parser_fld_copy(field,
612                                                 &ipv6_spec->hdr.proto,
613                                                 size);
614                 size = sizeof(ipv6_spec->hdr.hop_limits);
615                 field = ulp_rte_parser_fld_copy(field,
616                                                 &ipv6_spec->hdr.hop_limits,
617                                                 size);
618                 size = sizeof(ipv6_spec->hdr.src_addr);
619                 field = ulp_rte_parser_fld_copy(field,
620                                                 &ipv6_spec->hdr.src_addr,
621                                                 size);
622                 size = sizeof(ipv6_spec->hdr.dst_addr);
623                 field = ulp_rte_parser_fld_copy(field,
624                                                 &ipv6_spec->hdr.dst_addr,
625                                                 size);
626         }
627         if (ipv6_mask) {
628                 ulp_rte_prsr_mask_copy(params, &idx,
629                                        &ipv6_mask->hdr.vtc_flow,
630                                        sizeof(ipv6_mask->hdr.vtc_flow));
631                 ulp_rte_prsr_mask_copy(params, &idx,
632                                        &ipv6_mask->hdr.payload_len,
633                                        sizeof(ipv6_mask->hdr.payload_len));
634                 ulp_rte_prsr_mask_copy(params, &idx,
635                                        &ipv6_mask->hdr.proto,
636                                        sizeof(ipv6_mask->hdr.proto));
637                 ulp_rte_prsr_mask_copy(params, &idx,
638                                        &ipv6_mask->hdr.hop_limits,
639                                        sizeof(ipv6_mask->hdr.hop_limits));
640                 ulp_rte_prsr_mask_copy(params, &idx,
641                                        &ipv6_mask->hdr.src_addr,
642                                        sizeof(ipv6_mask->hdr.src_addr));
643                 ulp_rte_prsr_mask_copy(params, &idx,
644                                        &ipv6_mask->hdr.dst_addr,
645                                        sizeof(ipv6_mask->hdr.dst_addr));
646         }
647         /* add number of ipv6 header elements */
648         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
649
650         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
651         outer_l3 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L3);
652         if (outer_l3 ||
653             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
654             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
655                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
656                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L3, 1);
657         } else {
658                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
659                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L3, 1);
660         }
661         return BNXT_TF_RC_SUCCESS;
662 }
663
664 /* Function to handle the parsing of RTE Flow item UDP Header. */
665 int32_t
666 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
667                         struct ulp_rte_parser_params *params)
668 {
669         const struct rte_flow_item_udp *udp_spec = item->spec;
670         const struct rte_flow_item_udp *udp_mask = item->mask;
671         struct ulp_rte_hdr_field *field;
672         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
673         uint32_t idx = params->field_idx;
674         uint32_t size;
675         uint32_t inner_l4, outer_l4;
676
677         inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4);
678         if (inner_l4) {
679                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
680                 return BNXT_TF_RC_ERROR;
681         }
682
683         /*
684          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
685          * header fields
686          */
687         if (udp_spec) {
688                 size = sizeof(udp_spec->hdr.src_port);
689                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
690                                                 &udp_spec->hdr.src_port,
691                                                 size);
692                 size = sizeof(udp_spec->hdr.dst_port);
693                 field = ulp_rte_parser_fld_copy(field,
694                                                 &udp_spec->hdr.dst_port,
695                                                 size);
696                 size = sizeof(udp_spec->hdr.dgram_len);
697                 field = ulp_rte_parser_fld_copy(field,
698                                                 &udp_spec->hdr.dgram_len,
699                                                 size);
700                 size = sizeof(udp_spec->hdr.dgram_cksum);
701                 field = ulp_rte_parser_fld_copy(field,
702                                                 &udp_spec->hdr.dgram_cksum,
703                                                 size);
704         }
705         if (udp_mask) {
706                 ulp_rte_prsr_mask_copy(params, &idx,
707                                        &udp_mask->hdr.src_port,
708                                        sizeof(udp_mask->hdr.src_port));
709                 ulp_rte_prsr_mask_copy(params, &idx,
710                                        &udp_mask->hdr.dst_port,
711                                        sizeof(udp_mask->hdr.dst_port));
712                 ulp_rte_prsr_mask_copy(params, &idx,
713                                        &udp_mask->hdr.dgram_len,
714                                        sizeof(udp_mask->hdr.dgram_len));
715                 ulp_rte_prsr_mask_copy(params, &idx,
716                                        &udp_mask->hdr.dgram_cksum,
717                                        sizeof(udp_mask->hdr.dgram_cksum));
718         }
719
720         /* Add number of UDP header elements */
721         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
722
723         /* Set the udp header bitmap and computed l4 header bitmaps */
724         outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4);
725         if (outer_l4 ||
726             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
727             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
728                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
729                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1);
730         } else {
731                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
732                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1);
733         }
734         return BNXT_TF_RC_SUCCESS;
735 }
736
737 /* Function to handle the parsing of RTE Flow item TCP Header. */
738 int32_t
739 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
740                         struct ulp_rte_parser_params *params)
741 {
742         const struct rte_flow_item_tcp *tcp_spec = item->spec;
743         const struct rte_flow_item_tcp *tcp_mask = item->mask;
744         struct ulp_rte_hdr_field *field;
745         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
746         uint32_t idx = params->field_idx;
747         uint32_t size;
748         uint32_t inner_l4, outer_l4;
749
750         inner_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_I_L4);
751         if (inner_l4) {
752                 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
753                 return BNXT_TF_RC_ERROR;
754         }
755
756         /*
757          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
758          * header fields
759          */
760         if (tcp_spec) {
761                 size = sizeof(tcp_spec->hdr.src_port);
762                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
763                                                 &tcp_spec->hdr.src_port,
764                                                 size);
765                 size = sizeof(tcp_spec->hdr.dst_port);
766                 field = ulp_rte_parser_fld_copy(field,
767                                                 &tcp_spec->hdr.dst_port,
768                                                 size);
769                 size = sizeof(tcp_spec->hdr.sent_seq);
770                 field = ulp_rte_parser_fld_copy(field,
771                                                 &tcp_spec->hdr.sent_seq,
772                                                 size);
773                 size = sizeof(tcp_spec->hdr.recv_ack);
774                 field = ulp_rte_parser_fld_copy(field,
775                                                 &tcp_spec->hdr.recv_ack,
776                                                 size);
777                 size = sizeof(tcp_spec->hdr.data_off);
778                 field = ulp_rte_parser_fld_copy(field,
779                                                 &tcp_spec->hdr.data_off,
780                                                 size);
781                 size = sizeof(tcp_spec->hdr.tcp_flags);
782                 field = ulp_rte_parser_fld_copy(field,
783                                                 &tcp_spec->hdr.tcp_flags,
784                                                 size);
785                 size = sizeof(tcp_spec->hdr.rx_win);
786                 field = ulp_rte_parser_fld_copy(field,
787                                                 &tcp_spec->hdr.rx_win,
788                                                 size);
789                 size = sizeof(tcp_spec->hdr.cksum);
790                 field = ulp_rte_parser_fld_copy(field,
791                                                 &tcp_spec->hdr.cksum,
792                                                 size);
793                 size = sizeof(tcp_spec->hdr.tcp_urp);
794                 field = ulp_rte_parser_fld_copy(field,
795                                                 &tcp_spec->hdr.tcp_urp,
796                                                 size);
797         } else {
798                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
799         }
800
801         if (tcp_mask) {
802                 ulp_rte_prsr_mask_copy(params, &idx,
803                                        &tcp_mask->hdr.src_port,
804                                        sizeof(tcp_mask->hdr.src_port));
805                 ulp_rte_prsr_mask_copy(params, &idx,
806                                        &tcp_mask->hdr.dst_port,
807                                        sizeof(tcp_mask->hdr.dst_port));
808                 ulp_rte_prsr_mask_copy(params, &idx,
809                                        &tcp_mask->hdr.sent_seq,
810                                        sizeof(tcp_mask->hdr.sent_seq));
811                 ulp_rte_prsr_mask_copy(params, &idx,
812                                        &tcp_mask->hdr.recv_ack,
813                                        sizeof(tcp_mask->hdr.recv_ack));
814                 ulp_rte_prsr_mask_copy(params, &idx,
815                                        &tcp_mask->hdr.data_off,
816                                        sizeof(tcp_mask->hdr.data_off));
817                 ulp_rte_prsr_mask_copy(params, &idx,
818                                        &tcp_mask->hdr.tcp_flags,
819                                        sizeof(tcp_mask->hdr.tcp_flags));
820                 ulp_rte_prsr_mask_copy(params, &idx,
821                                        &tcp_mask->hdr.rx_win,
822                                        sizeof(tcp_mask->hdr.rx_win));
823                 ulp_rte_prsr_mask_copy(params, &idx,
824                                        &tcp_mask->hdr.cksum,
825                                        sizeof(tcp_mask->hdr.cksum));
826                 ulp_rte_prsr_mask_copy(params, &idx,
827                                        &tcp_mask->hdr.tcp_urp,
828                                        sizeof(tcp_mask->hdr.tcp_urp));
829         }
830         /* add number of TCP header elements */
831         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
832
833         /* Set the udp header bitmap and computed l4 header bitmaps */
834         outer_l4 = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_O_L4);
835         if (outer_l4 ||
836             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
837             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
838                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
839                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_I_L4, 1);
840         } else {
841                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
842                 ULP_UTIL_CHF_IDX_WR(params, BNXT_ULP_CHF_IDX_O_L4, 1);
843         }
844         return BNXT_TF_RC_SUCCESS;
845 }
846
847 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
848 int32_t
849 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
850                           struct ulp_rte_parser_params *params)
851 {
852         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
853         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
854         struct ulp_rte_hdr_field *field;
855         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
856         uint32_t idx = params->field_idx;
857         uint32_t size;
858
859         /*
860          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
861          * header fields
862          */
863         if (vxlan_spec) {
864                 size = sizeof(vxlan_spec->flags);
865                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
866                                                 &vxlan_spec->flags,
867                                                 size);
868                 size = sizeof(vxlan_spec->rsvd0);
869                 field = ulp_rte_parser_fld_copy(field,
870                                                 &vxlan_spec->rsvd0,
871                                                 size);
872                 size = sizeof(vxlan_spec->vni);
873                 field = ulp_rte_parser_fld_copy(field,
874                                                 &vxlan_spec->vni,
875                                                 size);
876                 size = sizeof(vxlan_spec->rsvd1);
877                 field = ulp_rte_parser_fld_copy(field,
878                                                 &vxlan_spec->rsvd1,
879                                                 size);
880         }
881         if (vxlan_mask) {
882                 ulp_rte_prsr_mask_copy(params, &idx,
883                                        &vxlan_mask->flags,
884                                        sizeof(vxlan_mask->flags));
885                 ulp_rte_prsr_mask_copy(params, &idx,
886                                        &vxlan_mask->rsvd0,
887                                        sizeof(vxlan_mask->rsvd0));
888                 ulp_rte_prsr_mask_copy(params, &idx,
889                                        &vxlan_mask->vni,
890                                        sizeof(vxlan_mask->vni));
891                 ulp_rte_prsr_mask_copy(params, &idx,
892                                        &vxlan_mask->rsvd1,
893                                        sizeof(vxlan_mask->rsvd1));
894         }
895         /* Add number of vxlan header elements */
896         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
897
898         /* Update the hdr_bitmap with vxlan */
899         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
900         return BNXT_TF_RC_SUCCESS;
901 }
902
903 /* Function to handle the parsing of RTE Flow item void Header */
904 int32_t
905 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
906                          struct ulp_rte_parser_params *params __rte_unused)
907 {
908         return BNXT_TF_RC_SUCCESS;
909 }
910
911 /* Function to handle the parsing of RTE Flow action void Header. */
912 int32_t
913 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
914                          struct ulp_rte_parser_params *params __rte_unused)
915 {
916         return BNXT_TF_RC_SUCCESS;
917 }
918
919 /* Function to handle the parsing of RTE Flow action Mark Header. */
920 int32_t
921 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
922                          struct ulp_rte_parser_params *param)
923 {
924         const struct rte_flow_action_mark *mark;
925         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
926         uint32_t mark_id;
927
928         mark = action_item->conf;
929         if (mark) {
930                 mark_id = tfp_cpu_to_be_32(mark->id);
931                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
932                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
933
934                 /* Update the hdr_bitmap with vxlan */
935                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
936                 return BNXT_TF_RC_SUCCESS;
937         }
938         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
939         return BNXT_TF_RC_ERROR;
940 }
941
942 /* Function to handle the parsing of RTE Flow action RSS Header. */
943 int32_t
944 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
945                         struct ulp_rte_parser_params *param)
946 {
947         const struct rte_flow_action_rss *rss = action_item->conf;
948
949         if (rss) {
950                 /* Update the hdr_bitmap with vxlan */
951                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
952                 return BNXT_TF_RC_SUCCESS;
953         }
954         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
955         return BNXT_TF_RC_ERROR;
956 }
957
958 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
959 int32_t
960 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
961                                 struct ulp_rte_parser_params *params)
962 {
963         const struct rte_flow_action_vxlan_encap *vxlan_encap;
964         const struct rte_flow_item *item;
965         const struct rte_flow_item_eth *eth_spec;
966         const struct rte_flow_item_ipv4 *ipv4_spec;
967         const struct rte_flow_item_ipv6 *ipv6_spec;
968         struct rte_flow_item_vxlan vxlan_spec;
969         uint32_t vlan_num = 0, vlan_size = 0;
970         uint32_t ip_size = 0, ip_type = 0;
971         uint32_t vxlan_size = 0;
972         uint8_t *buff;
973         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
974         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
975                                     0x00, 0x40, 0x11};
976         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
977         struct ulp_rte_act_prop *ap = &params->act_prop;
978
979         vxlan_encap = action_item->conf;
980         if (!vxlan_encap) {
981                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
982                 return BNXT_TF_RC_ERROR;
983         }
984
985         item = vxlan_encap->definition;
986         if (!item) {
987                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
988                 return BNXT_TF_RC_ERROR;
989         }
990
991         if (!ulp_rte_item_skip_void(&item, 0))
992                 return BNXT_TF_RC_ERROR;
993
994         /* must have ethernet header */
995         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
996                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
997                 return BNXT_TF_RC_ERROR;
998         }
999         eth_spec = item->spec;
1000         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1001         ulp_encap_buffer_copy(buff,
1002                               eth_spec->dst.addr_bytes,
1003                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1004
1005         /* Goto the next item */
1006         if (!ulp_rte_item_skip_void(&item, 1))
1007                 return BNXT_TF_RC_ERROR;
1008
1009         /* May have vlan header */
1010         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1011                 vlan_num++;
1012                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1013                 ulp_encap_buffer_copy(buff,
1014                                       item->spec,
1015                                       sizeof(struct rte_flow_item_vlan));
1016
1017                 if (!ulp_rte_item_skip_void(&item, 1))
1018                         return BNXT_TF_RC_ERROR;
1019         }
1020
1021         /* may have two vlan headers */
1022         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1023                 vlan_num++;
1024                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1025                        sizeof(struct rte_flow_item_vlan)],
1026                        item->spec,
1027                        sizeof(struct rte_flow_item_vlan));
1028                 if (!ulp_rte_item_skip_void(&item, 1))
1029                         return BNXT_TF_RC_ERROR;
1030         }
1031         /* Update the vlan count and size of more than one */
1032         if (vlan_num) {
1033                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1034                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1035                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1036                        &vlan_num,
1037                        sizeof(uint32_t));
1038                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1039                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1040                        &vlan_size,
1041                        sizeof(uint32_t));
1042         }
1043
1044         /* L3 must be IPv4, IPv6 */
1045         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1046                 ipv4_spec = item->spec;
1047                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1048
1049                 /* copy the ipv4 details */
1050                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1051                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1052                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1053                         ulp_encap_buffer_copy(buff,
1054                                               def_ipv4_hdr,
1055                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1056                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1057                 } else {
1058                         const uint8_t *tmp_buff;
1059
1060                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1061                         ulp_encap_buffer_copy(buff,
1062                                               &ipv4_spec->hdr.version_ihl,
1063                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1064                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1065                              BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1066                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1067                         ulp_encap_buffer_copy(buff,
1068                                               tmp_buff,
1069                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1070                 }
1071                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1072                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1073                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1074                 ulp_encap_buffer_copy(buff,
1075                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1076                                       BNXT_ULP_ENCAP_IPV4_DEST_IP);
1077
1078                 /* Update the ip size details */
1079                 ip_size = tfp_cpu_to_be_32(ip_size);
1080                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1081                        &ip_size, sizeof(uint32_t));
1082
1083                 /* update the ip type */
1084                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1085                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1086                        &ip_type, sizeof(uint32_t));
1087
1088                 if (!ulp_rte_item_skip_void(&item, 1))
1089                         return BNXT_TF_RC_ERROR;
1090         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1091                 ipv6_spec = item->spec;
1092                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1093
1094                 /* copy the ipv4 details */
1095                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1096                        ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1097
1098                 /* Update the ip size details */
1099                 ip_size = tfp_cpu_to_be_32(ip_size);
1100                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1101                        &ip_size, sizeof(uint32_t));
1102
1103                  /* update the ip type */
1104                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1105                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1106                        &ip_type, sizeof(uint32_t));
1107
1108                 if (!ulp_rte_item_skip_void(&item, 1))
1109                         return BNXT_TF_RC_ERROR;
1110         } else {
1111                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1112                 return BNXT_TF_RC_ERROR;
1113         }
1114
1115         /* L4 is UDP */
1116         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1117                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1118                 return BNXT_TF_RC_ERROR;
1119         }
1120         /* copy the udp details */
1121         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1122                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1123
1124         if (!ulp_rte_item_skip_void(&item, 1))
1125                 return BNXT_TF_RC_ERROR;
1126
1127         /* Finally VXLAN */
1128         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1129                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1130                 return BNXT_TF_RC_ERROR;
1131         }
1132         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1133         /* copy the vxlan details */
1134         memcpy(&vxlan_spec, item->spec, vxlan_size);
1135         vxlan_spec.flags = 0x08;
1136         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1137                               (const uint8_t *)&vxlan_spec,
1138                               vxlan_size);
1139         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1140         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1141                &vxlan_size, sizeof(uint32_t));
1142
1143         /*update the hdr_bitmap with vxlan */
1144         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1145         return BNXT_TF_RC_SUCCESS;
1146 }
1147
1148 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1149 int32_t
1150 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1151                                 __rte_unused,
1152                                 struct ulp_rte_parser_params *params)
1153 {
1154         /* update the hdr_bitmap with vxlan */
1155         ULP_BITMAP_SET(params->act_bitmap.bits,
1156                        BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1157         return BNXT_TF_RC_SUCCESS;
1158 }
1159
1160 /* Function to handle the parsing of RTE Flow action drop Header. */
1161 int32_t
1162 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1163                          struct ulp_rte_parser_params *params)
1164 {
1165         /* Update the hdr_bitmap with drop */
1166         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1167         return BNXT_TF_RC_SUCCESS;
1168 }
1169
1170 /* Function to handle the parsing of RTE Flow action count. */
1171 int32_t
1172 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1173                           struct ulp_rte_parser_params *params)
1174
1175 {
1176         const struct rte_flow_action_count *act_count;
1177         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1178
1179         act_count = action_item->conf;
1180         if (act_count) {
1181                 if (act_count->shared) {
1182                         BNXT_TF_DBG(ERR,
1183                                     "Parse Error:Shared count not supported\n");
1184                         return BNXT_TF_RC_PARSE_ERR;
1185                 }
1186                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1187                        &act_count->id,
1188                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1189         }
1190
1191         /* Update the hdr_bitmap with count */
1192         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1193         return BNXT_TF_RC_SUCCESS;
1194 }
1195
1196 /* Function to handle the parsing of RTE Flow action PF. */
1197 int32_t
1198 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1199                        struct ulp_rte_parser_params *params)
1200 {
1201         uint32_t svif;
1202
1203         /* Update the hdr_bitmap with vnic bit */
1204         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1205
1206         /* copy the PF of the current device into VNIC Property */
1207         svif = ULP_UTIL_CHF_IDX_RD(params, BNXT_ULP_CHF_IDX_INCOMING_IF);
1208         svif = bnxt_get_vnic_id(svif);
1209         svif = rte_cpu_to_be_32(svif);
1210         memcpy(&params->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1211                &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1212
1213         return BNXT_TF_RC_SUCCESS;
1214 }
1215
1216 /* Function to handle the parsing of RTE Flow action VF. */
1217 int32_t
1218 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1219                        struct ulp_rte_parser_params *param)
1220 {
1221         const struct rte_flow_action_vf *vf_action;
1222         uint32_t pid;
1223
1224         vf_action = action_item->conf;
1225         if (vf_action) {
1226                 if (vf_action->original) {
1227                         BNXT_TF_DBG(ERR,
1228                                     "Parse Error:VF Original not supported\n");
1229                         return BNXT_TF_RC_PARSE_ERR;
1230                 }
1231                 /* TBD: Update the computed VNIC using VF conversion */
1232                 pid = bnxt_get_vnic_id(vf_action->id);
1233                 pid = rte_cpu_to_be_32(pid);
1234                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1235                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1236         }
1237
1238         /* Update the hdr_bitmap with count */
1239         ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1240         return BNXT_TF_RC_SUCCESS;
1241 }
1242
1243 /* Function to handle the parsing of RTE Flow action port_id. */
1244 int32_t
1245 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1246                             struct ulp_rte_parser_params *param)
1247 {
1248         const struct rte_flow_action_port_id *port_id;
1249         uint32_t pid;
1250
1251         port_id = act_item->conf;
1252         if (port_id) {
1253                 if (port_id->original) {
1254                         BNXT_TF_DBG(ERR,
1255                                     "ParseErr:Portid Original not supported\n");
1256                         return BNXT_TF_RC_PARSE_ERR;
1257                 }
1258                 /* TBD: Update the computed VNIC using port conversion */
1259                 pid = bnxt_get_vnic_id(port_id->id);
1260                 pid = rte_cpu_to_be_32(pid);
1261                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1262                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1263         }
1264
1265         /* Update the hdr_bitmap with count */
1266         ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1267         return BNXT_TF_RC_SUCCESS;
1268 }
1269
1270 /* Function to handle the parsing of RTE Flow action phy_port. */
1271 int32_t
1272 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1273                              struct ulp_rte_parser_params *prm)
1274 {
1275         const struct rte_flow_action_phy_port *phy_port;
1276         uint32_t pid;
1277
1278         phy_port = action_item->conf;
1279         if (phy_port) {
1280                 if (phy_port->original) {
1281                         BNXT_TF_DBG(ERR,
1282                                     "Parse Err:Port Original not supported\n");
1283                         return BNXT_TF_RC_PARSE_ERR;
1284                 }
1285                 pid = bnxt_get_vnic_id(phy_port->index);
1286                 pid = rte_cpu_to_be_32(pid);
1287                 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1288                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1289         }
1290
1291         /* Update the hdr_bitmap with count */
1292         ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1293         return BNXT_TF_RC_SUCCESS;
1294 }