net/bnxt: enhance port DB
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
12 #include "tfp.h"
13 #include "ulp_port_db.h"
14
15 /* Utility function to skip the void items. */
16 static inline int32_t
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
18 {
19         if (!*item)
20                 return 0;
21         if (increment)
22                 (*item)++;
23         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
24                 (*item)++;
25         if (*item)
26                 return 1;
27         return 0;
28 }
29
30 /* Utility function to update the field_bitmap */
31 static void
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
33                                    uint32_t idx)
34 {
35         struct ulp_rte_hdr_field *field;
36
37         field = &params->hdr_field[idx];
38         if (ulp_bitmap_notzero(field->mask, field->size)) {
39                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
40                 /* Not exact match */
41                 if (!ulp_bitmap_is_ones(field->mask, field->size))
42                         ULP_BITMAP_SET(params->fld_bitmap.bits,
43                                        BNXT_ULP_MATCH_TYPE_BITMASK_WM);
44         } else {
45                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
46         }
47 }
48
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
52                         const void *buffer,
53                         uint32_t size)
54 {
55         field->size = size;
56         memcpy(field->spec, buffer, field->size);
57         field++;
58         return field;
59 }
60
61 /* Utility function to copy field masks items */
62 static void
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
64                        uint32_t *idx,
65                        const void *buffer,
66                        uint32_t size)
67 {
68         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
69
70         memcpy(field->mask, buffer, size);
71         ulp_rte_parser_field_bitmap_update(params, *idx);
72         *idx = *idx + 1;
73 }
74
75 /*
76  * Function to handle the parsing of RTE Flows and placing
77  * the RTE flow items into the ulp structures.
78  */
79 int32_t
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81                               struct ulp_rte_parser_params *params)
82 {
83         const struct rte_flow_item *item = pattern;
84         struct bnxt_ulp_rte_hdr_info *hdr_info;
85
86         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87         if (params->dir == ULP_DIR_EGRESS)
88                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
90
91         /* Parse all the items in the pattern */
92         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93                 /* get the header information from the flow_hdr_info table */
94                 hdr_info = &ulp_hdr_info[item->type];
95                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
96                         BNXT_TF_DBG(ERR,
97                                     "Truflow parser does not support type %d\n",
98                                     item->type);
99                         return BNXT_TF_RC_PARSE_ERR;
100                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101                         /* call the registered callback handler */
102                         if (hdr_info->proto_hdr_func) {
103                                 if (hdr_info->proto_hdr_func(item, params) !=
104                                     BNXT_TF_RC_SUCCESS) {
105                                         return BNXT_TF_RC_ERROR;
106                                 }
107                         }
108                 }
109                 item++;
110         }
111         /* update the implied SVIF */
112         (void)ulp_rte_parser_svif_process(params);
113         return BNXT_TF_RC_SUCCESS;
114 }
115
116 /*
117  * Function to handle the parsing of RTE Flows and placing
118  * the RTE flow actions into the ulp structures.
119  */
120 int32_t
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122                               struct ulp_rte_parser_params *params)
123 {
124         const struct rte_flow_action *action_item = actions;
125         struct bnxt_ulp_rte_act_info *hdr_info;
126
127         if (params->dir == ULP_DIR_EGRESS)
128                 ULP_BITMAP_SET(params->act_bitmap.bits,
129                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
130
131         /* Parse all the items in the pattern */
132         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
133                 /* get the header information from the flow_hdr_info table */
134                 hdr_info = &ulp_act_info[action_item->type];
135                 if (hdr_info->act_type ==
136                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
137                         BNXT_TF_DBG(ERR,
138                                     "Truflow parser does not support act %u\n",
139                                     action_item->type);
140                         return BNXT_TF_RC_ERROR;
141                 } else if (hdr_info->act_type ==
142                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
143                         /* call the registered callback handler */
144                         if (hdr_info->proto_act_func) {
145                                 if (hdr_info->proto_act_func(action_item,
146                                                              params) !=
147                                     BNXT_TF_RC_SUCCESS) {
148                                         return BNXT_TF_RC_ERROR;
149                                 }
150                         }
151                 }
152                 action_item++;
153         }
154         /* update the implied VNIC */
155         ulp_rte_parser_vnic_process(params);
156         return BNXT_TF_RC_SUCCESS;
157 }
158
159 /* Function to handle the parsing of RTE Flow item PF Header. */
160 static int32_t
161 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
162                         enum rte_flow_item_type proto,
163                         uint16_t svif,
164                         uint16_t mask)
165 {
166         uint16_t port_id = svif;
167         uint32_t dir = 0;
168         struct ulp_rte_hdr_field *hdr_field;
169         enum bnxt_ulp_svif_type svif_type;
170         enum bnxt_ulp_intf_type if_type;
171         uint32_t ifindex;
172         int32_t rc;
173
174         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
175             BNXT_ULP_INVALID_SVIF_VAL) {
176                 BNXT_TF_DBG(ERR,
177                             "SVIF already set,multiple source not support'd\n");
178                 return BNXT_TF_RC_ERROR;
179         }
180
181         if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
182                 dir = ULP_COMP_FLD_IDX_RD(params,
183                                           BNXT_ULP_CF_IDX_DIRECTION);
184                 /* perform the conversion from dpdk port to bnxt svif */
185                 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
186                                                        &ifindex);
187                 if (rc) {
188                         BNXT_TF_DBG(ERR,
189                                     "Invalid port id\n");
190                         return BNXT_TF_RC_ERROR;
191                 }
192
193                 if (dir == ULP_DIR_INGRESS) {
194                         svif_type = BNXT_ULP_PHY_PORT_SVIF;
195                 } else {
196                         if_type = bnxt_get_interface_type(port_id);
197                         if (if_type == BNXT_ULP_INTF_TYPE_VF_REP)
198                                 svif_type = BNXT_ULP_VF_FUNC_SVIF;
199                         else
200                                 svif_type = BNXT_ULP_DRV_FUNC_SVIF;
201                 }
202                 ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
203                                      &svif);
204                 svif = rte_cpu_to_be_16(svif);
205         }
206         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
207         memcpy(hdr_field->spec, &svif, sizeof(svif));
208         memcpy(hdr_field->mask, &mask, sizeof(mask));
209         hdr_field->size = sizeof(svif);
210         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
211                             rte_be_to_cpu_16(svif));
212         return BNXT_TF_RC_SUCCESS;
213 }
214
215 /* Function to handle the parsing of the RTE port id */
216 int32_t
217 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
218 {
219         uint16_t port_id = 0;
220         uint16_t svif_mask = 0xFFFF;
221
222         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
223             BNXT_ULP_INVALID_SVIF_VAL)
224                 return BNXT_TF_RC_SUCCESS;
225
226         /* SVIF not set. So get the port id */
227         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
228
229         /* Update the SVIF details */
230         return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
231                                        port_id, svif_mask);
232 }
233
234 /* Function to handle the implicit VNIC RTE port id */
235 int32_t
236 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
237 {
238         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
239
240         if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
241             ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
242                 return BNXT_TF_RC_SUCCESS;
243
244         /* Update the vnic details */
245         ulp_rte_pf_act_handler(NULL, params);
246         /* Reset the hdr_bitmap with vnic bit */
247         ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
248
249         return BNXT_TF_RC_SUCCESS;
250 }
251
252 /* Function to handle the parsing of RTE Flow item PF Header. */
253 int32_t
254 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
255                        struct ulp_rte_parser_params *params)
256 {
257         uint16_t port_id = 0;
258         uint16_t svif_mask = 0xFFFF;
259
260         /* Get the port id */
261         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
262
263         /* Update the SVIF details */
264         return ulp_rte_parser_svif_set(params,
265                                        item->type,
266                                        port_id, svif_mask);
267 }
268
269 /* Function to handle the parsing of RTE Flow item VF Header. */
270 int32_t
271 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
272                        struct ulp_rte_parser_params *params)
273 {
274         const struct rte_flow_item_vf *vf_spec = item->spec;
275         const struct rte_flow_item_vf *vf_mask = item->mask;
276         uint16_t svif = 0, mask = 0;
277
278         /* Get VF rte_flow_item for Port details */
279         if (vf_spec)
280                 svif = (uint16_t)vf_spec->id;
281         if (vf_mask)
282                 mask = (uint16_t)vf_mask->id;
283
284         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
285 }
286
287 /* Function to handle the parsing of RTE Flow item port id  Header. */
288 int32_t
289 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
290                             struct ulp_rte_parser_params *params)
291 {
292         const struct rte_flow_item_port_id *port_spec = item->spec;
293         const struct rte_flow_item_port_id *port_mask = item->mask;
294         uint16_t svif = 0, mask = 0;
295
296         /*
297          * Copy the rte_flow_item for Port into hdr_field using port id
298          * header fields.
299          */
300         if (port_spec)
301                 svif = (uint16_t)port_spec->id;
302         if (port_mask)
303                 mask = (uint16_t)port_mask->id;
304
305         /* Update the SVIF details */
306         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
307 }
308
309 /* Function to handle the parsing of RTE Flow item phy port Header. */
310 int32_t
311 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
312                              struct ulp_rte_parser_params *params)
313 {
314         const struct rte_flow_item_phy_port *port_spec = item->spec;
315         const struct rte_flow_item_phy_port *port_mask = item->mask;
316         uint32_t svif = 0, mask = 0;
317
318         /* Copy the rte_flow_item for phy port into hdr_field */
319         if (port_spec)
320                 svif = port_spec->index;
321         if (port_mask)
322                 mask = port_mask->index;
323
324         /* Update the SVIF details */
325         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
326 }
327
328 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
329 int32_t
330 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
331                         struct ulp_rte_parser_params *params)
332 {
333         const struct rte_flow_item_eth *eth_spec = item->spec;
334         const struct rte_flow_item_eth *eth_mask = item->mask;
335         struct ulp_rte_hdr_field *field;
336         uint32_t idx = params->field_idx;
337         uint64_t set_flag = 0;
338         uint32_t size;
339
340         /*
341          * Copy the rte_flow_item for eth into hdr_field using ethernet
342          * header fields
343          */
344         if (eth_spec) {
345                 size = sizeof(eth_spec->dst.addr_bytes);
346                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
347                                                 eth_spec->dst.addr_bytes,
348                                                 size);
349                 size = sizeof(eth_spec->src.addr_bytes);
350                 field = ulp_rte_parser_fld_copy(field,
351                                                 eth_spec->src.addr_bytes,
352                                                 size);
353                 field = ulp_rte_parser_fld_copy(field,
354                                                 &eth_spec->type,
355                                                 sizeof(eth_spec->type));
356         }
357         if (eth_mask) {
358                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
359                                        sizeof(eth_mask->dst.addr_bytes));
360                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
361                                        sizeof(eth_mask->src.addr_bytes));
362                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
363                                        sizeof(eth_mask->type));
364         }
365         /* Add number of vlan header elements */
366         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
367         params->vlan_idx = params->field_idx;
368         params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
369
370         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
371         set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
372                                     BNXT_ULP_HDR_BIT_O_ETH);
373         if (set_flag)
374                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
375         else
376                 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
377                                  BNXT_ULP_HDR_BIT_I_ETH);
378
379         /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
380         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
381
382         return BNXT_TF_RC_SUCCESS;
383 }
384
385 /* Function to handle the parsing of RTE Flow item Vlan Header. */
386 int32_t
387 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
388                          struct ulp_rte_parser_params *params)
389 {
390         const struct rte_flow_item_vlan *vlan_spec = item->spec;
391         const struct rte_flow_item_vlan *vlan_mask = item->mask;
392         struct ulp_rte_hdr_field *field;
393         struct ulp_rte_hdr_bitmap       *hdr_bit;
394         uint32_t idx = params->vlan_idx;
395         uint16_t vlan_tag, priority;
396         uint32_t outer_vtag_num;
397         uint32_t inner_vtag_num;
398
399         /*
400          * Copy the rte_flow_item for vlan into hdr_field using Vlan
401          * header fields
402          */
403         if (vlan_spec) {
404                 vlan_tag = ntohs(vlan_spec->tci);
405                 priority = htons(vlan_tag >> 13);
406                 vlan_tag &= 0xfff;
407                 vlan_tag = htons(vlan_tag);
408
409                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
410                                                 &priority,
411                                                 sizeof(priority));
412                 field = ulp_rte_parser_fld_copy(field,
413                                                 &vlan_tag,
414                                                 sizeof(vlan_tag));
415                 field = ulp_rte_parser_fld_copy(field,
416                                                 &vlan_spec->inner_type,
417                                                 sizeof(vlan_spec->inner_type));
418         }
419
420         if (vlan_mask) {
421                 vlan_tag = ntohs(vlan_mask->tci);
422                 priority = htons(vlan_tag >> 13);
423                 vlan_tag &= 0xfff;
424                 vlan_tag = htons(vlan_tag);
425
426                 field = &params->hdr_field[idx];
427                 memcpy(field->mask, &priority, field->size);
428                 field++;
429                 memcpy(field->mask, &vlan_tag, field->size);
430                 field++;
431                 memcpy(field->mask, &vlan_mask->inner_type, field->size);
432         }
433         /* Set the vlan index to new incremented value */
434         params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
435
436         /* Get the outer tag and inner tag counts */
437         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
438                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
439         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
440                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
441
442         /* Update the hdr_bitmap of the vlans */
443         hdr_bit = &params->hdr_bitmap;
444         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
445             !outer_vtag_num) {
446                 /* Update the vlan tag num */
447                 outer_vtag_num++;
448                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
449                                     outer_vtag_num);
450                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_PRESENT, 1);
451         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
452                    ULP_COMP_FLD_IDX_RD(params,
453                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
454                    outer_vtag_num == 1) {
455                 /* update the vlan tag num */
456                 outer_vtag_num++;
457                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
458                                     outer_vtag_num);
459                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
460         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
461                    ULP_COMP_FLD_IDX_RD(params,
462                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
463                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
464                    !inner_vtag_num) {
465                 /* update the vlan tag num */
466                 inner_vtag_num++;
467                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
468                                     inner_vtag_num);
469                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_PRESENT, 1);
470         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
471                    ULP_COMP_FLD_IDX_RD(params,
472                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
473                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
474                    ULP_COMP_FLD_IDX_RD(params,
475                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
476                    inner_vtag_num == 1) {
477                 /* update the vlan tag num */
478                 inner_vtag_num++;
479                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
480                                     inner_vtag_num);
481                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
482         } else {
483                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
484                 return BNXT_TF_RC_ERROR;
485         }
486         return BNXT_TF_RC_SUCCESS;
487 }
488
489 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
490 int32_t
491 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
492                          struct ulp_rte_parser_params *params)
493 {
494         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
495         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
496         struct ulp_rte_hdr_field *field;
497         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
498         uint32_t idx = params->field_idx;
499         uint32_t size;
500         uint32_t inner_l3, outer_l3;
501
502         inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
503         if (inner_l3) {
504                 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
505                 return BNXT_TF_RC_ERROR;
506         }
507
508         /*
509          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
510          * header fields
511          */
512         if (ipv4_spec) {
513                 size = sizeof(ipv4_spec->hdr.version_ihl);
514                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
515                                                 &ipv4_spec->hdr.version_ihl,
516                                                 size);
517                 size = sizeof(ipv4_spec->hdr.type_of_service);
518                 field = ulp_rte_parser_fld_copy(field,
519                                                 &ipv4_spec->hdr.type_of_service,
520                                                 size);
521                 size = sizeof(ipv4_spec->hdr.total_length);
522                 field = ulp_rte_parser_fld_copy(field,
523                                                 &ipv4_spec->hdr.total_length,
524                                                 size);
525                 size = sizeof(ipv4_spec->hdr.packet_id);
526                 field = ulp_rte_parser_fld_copy(field,
527                                                 &ipv4_spec->hdr.packet_id,
528                                                 size);
529                 size = sizeof(ipv4_spec->hdr.fragment_offset);
530                 field = ulp_rte_parser_fld_copy(field,
531                                                 &ipv4_spec->hdr.fragment_offset,
532                                                 size);
533                 size = sizeof(ipv4_spec->hdr.time_to_live);
534                 field = ulp_rte_parser_fld_copy(field,
535                                                 &ipv4_spec->hdr.time_to_live,
536                                                 size);
537                 size = sizeof(ipv4_spec->hdr.next_proto_id);
538                 field = ulp_rte_parser_fld_copy(field,
539                                                 &ipv4_spec->hdr.next_proto_id,
540                                                 size);
541                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
542                 field = ulp_rte_parser_fld_copy(field,
543                                                 &ipv4_spec->hdr.hdr_checksum,
544                                                 size);
545                 size = sizeof(ipv4_spec->hdr.src_addr);
546                 field = ulp_rte_parser_fld_copy(field,
547                                                 &ipv4_spec->hdr.src_addr,
548                                                 size);
549                 size = sizeof(ipv4_spec->hdr.dst_addr);
550                 field = ulp_rte_parser_fld_copy(field,
551                                                 &ipv4_spec->hdr.dst_addr,
552                                                 size);
553         }
554         if (ipv4_mask) {
555                 ulp_rte_prsr_mask_copy(params, &idx,
556                                        &ipv4_mask->hdr.version_ihl,
557                                        sizeof(ipv4_mask->hdr.version_ihl));
558                 ulp_rte_prsr_mask_copy(params, &idx,
559                                        &ipv4_mask->hdr.type_of_service,
560                                        sizeof(ipv4_mask->hdr.type_of_service));
561                 ulp_rte_prsr_mask_copy(params, &idx,
562                                        &ipv4_mask->hdr.total_length,
563                                        sizeof(ipv4_mask->hdr.total_length));
564                 ulp_rte_prsr_mask_copy(params, &idx,
565                                        &ipv4_mask->hdr.packet_id,
566                                        sizeof(ipv4_mask->hdr.packet_id));
567                 ulp_rte_prsr_mask_copy(params, &idx,
568                                        &ipv4_mask->hdr.fragment_offset,
569                                        sizeof(ipv4_mask->hdr.fragment_offset));
570                 ulp_rte_prsr_mask_copy(params, &idx,
571                                        &ipv4_mask->hdr.time_to_live,
572                                        sizeof(ipv4_mask->hdr.time_to_live));
573                 ulp_rte_prsr_mask_copy(params, &idx,
574                                        &ipv4_mask->hdr.next_proto_id,
575                                        sizeof(ipv4_mask->hdr.next_proto_id));
576                 ulp_rte_prsr_mask_copy(params, &idx,
577                                        &ipv4_mask->hdr.hdr_checksum,
578                                        sizeof(ipv4_mask->hdr.hdr_checksum));
579                 ulp_rte_prsr_mask_copy(params, &idx,
580                                        &ipv4_mask->hdr.src_addr,
581                                        sizeof(ipv4_mask->hdr.src_addr));
582                 ulp_rte_prsr_mask_copy(params, &idx,
583                                        &ipv4_mask->hdr.dst_addr,
584                                        sizeof(ipv4_mask->hdr.dst_addr));
585         }
586         /* Add the number of ipv4 header elements */
587         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
588
589         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
590         outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
591         if (outer_l3 ||
592             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
593             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
594                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
595                 inner_l3++;
596                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
597         } else {
598                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
599                 outer_l3++;
600                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
601         }
602         return BNXT_TF_RC_SUCCESS;
603 }
604
605 /* Function to handle the parsing of RTE Flow item IPV6 Header */
606 int32_t
607 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
608                          struct ulp_rte_parser_params *params)
609 {
610         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
611         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
612         struct ulp_rte_hdr_field *field;
613         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
614         uint32_t idx = params->field_idx;
615         uint32_t size;
616         uint32_t inner_l3, outer_l3;
617         uint32_t vtcf, vtcf_mask;
618
619         inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
620         if (inner_l3) {
621                 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
622                 return BNXT_TF_RC_ERROR;
623         }
624
625         /*
626          * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
627          * header fields
628          */
629         if (ipv6_spec) {
630                 size = sizeof(ipv6_spec->hdr.vtc_flow);
631
632                 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
633                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
634                                                 &vtcf,
635                                                 size);
636
637                 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
638                 field = ulp_rte_parser_fld_copy(field,
639                                                 &vtcf,
640                                                 size);
641
642                 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
643                 field = ulp_rte_parser_fld_copy(field,
644                                                 &vtcf,
645                                                 size);
646
647                 size = sizeof(ipv6_spec->hdr.payload_len);
648                 field = ulp_rte_parser_fld_copy(field,
649                                                 &ipv6_spec->hdr.payload_len,
650                                                 size);
651                 size = sizeof(ipv6_spec->hdr.proto);
652                 field = ulp_rte_parser_fld_copy(field,
653                                                 &ipv6_spec->hdr.proto,
654                                                 size);
655                 size = sizeof(ipv6_spec->hdr.hop_limits);
656                 field = ulp_rte_parser_fld_copy(field,
657                                                 &ipv6_spec->hdr.hop_limits,
658                                                 size);
659                 size = sizeof(ipv6_spec->hdr.src_addr);
660                 field = ulp_rte_parser_fld_copy(field,
661                                                 &ipv6_spec->hdr.src_addr,
662                                                 size);
663                 size = sizeof(ipv6_spec->hdr.dst_addr);
664                 field = ulp_rte_parser_fld_copy(field,
665                                                 &ipv6_spec->hdr.dst_addr,
666                                                 size);
667         }
668         if (ipv6_mask) {
669                 size = sizeof(ipv6_mask->hdr.vtc_flow);
670
671                 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
672                 ulp_rte_prsr_mask_copy(params, &idx,
673                                        &vtcf_mask,
674                                        size);
675
676                 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
677                 ulp_rte_prsr_mask_copy(params, &idx,
678                                        &vtcf_mask,
679                                        size);
680
681                 vtcf_mask =
682                         BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
683                 ulp_rte_prsr_mask_copy(params, &idx,
684                                        &vtcf_mask,
685                                        size);
686
687                 ulp_rte_prsr_mask_copy(params, &idx,
688                                        &ipv6_mask->hdr.payload_len,
689                                        sizeof(ipv6_mask->hdr.payload_len));
690                 ulp_rte_prsr_mask_copy(params, &idx,
691                                        &ipv6_mask->hdr.proto,
692                                        sizeof(ipv6_mask->hdr.proto));
693                 ulp_rte_prsr_mask_copy(params, &idx,
694                                        &ipv6_mask->hdr.hop_limits,
695                                        sizeof(ipv6_mask->hdr.hop_limits));
696                 ulp_rte_prsr_mask_copy(params, &idx,
697                                        &ipv6_mask->hdr.src_addr,
698                                        sizeof(ipv6_mask->hdr.src_addr));
699                 ulp_rte_prsr_mask_copy(params, &idx,
700                                        &ipv6_mask->hdr.dst_addr,
701                                        sizeof(ipv6_mask->hdr.dst_addr));
702         }
703         /* add number of ipv6 header elements */
704         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
705
706         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
707         outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
708         if (outer_l3 ||
709             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
710             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
711                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
712                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
713         } else {
714                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
715                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
716         }
717         return BNXT_TF_RC_SUCCESS;
718 }
719
720 /* Function to handle the parsing of RTE Flow item UDP Header. */
721 int32_t
722 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
723                         struct ulp_rte_parser_params *params)
724 {
725         const struct rte_flow_item_udp *udp_spec = item->spec;
726         const struct rte_flow_item_udp *udp_mask = item->mask;
727         struct ulp_rte_hdr_field *field;
728         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
729         uint32_t idx = params->field_idx;
730         uint32_t size;
731         uint32_t inner_l4, outer_l4;
732
733         inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
734         if (inner_l4) {
735                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
736                 return BNXT_TF_RC_ERROR;
737         }
738
739         /*
740          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
741          * header fields
742          */
743         if (udp_spec) {
744                 size = sizeof(udp_spec->hdr.src_port);
745                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
746                                                 &udp_spec->hdr.src_port,
747                                                 size);
748                 size = sizeof(udp_spec->hdr.dst_port);
749                 field = ulp_rte_parser_fld_copy(field,
750                                                 &udp_spec->hdr.dst_port,
751                                                 size);
752                 size = sizeof(udp_spec->hdr.dgram_len);
753                 field = ulp_rte_parser_fld_copy(field,
754                                                 &udp_spec->hdr.dgram_len,
755                                                 size);
756                 size = sizeof(udp_spec->hdr.dgram_cksum);
757                 field = ulp_rte_parser_fld_copy(field,
758                                                 &udp_spec->hdr.dgram_cksum,
759                                                 size);
760         }
761         if (udp_mask) {
762                 ulp_rte_prsr_mask_copy(params, &idx,
763                                        &udp_mask->hdr.src_port,
764                                        sizeof(udp_mask->hdr.src_port));
765                 ulp_rte_prsr_mask_copy(params, &idx,
766                                        &udp_mask->hdr.dst_port,
767                                        sizeof(udp_mask->hdr.dst_port));
768                 ulp_rte_prsr_mask_copy(params, &idx,
769                                        &udp_mask->hdr.dgram_len,
770                                        sizeof(udp_mask->hdr.dgram_len));
771                 ulp_rte_prsr_mask_copy(params, &idx,
772                                        &udp_mask->hdr.dgram_cksum,
773                                        sizeof(udp_mask->hdr.dgram_cksum));
774         }
775
776         /* Add number of UDP header elements */
777         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
778
779         /* Set the udp header bitmap and computed l4 header bitmaps */
780         outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
781         if (outer_l4 ||
782             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
783             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
784                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
785                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
786         } else {
787                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
788                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
789         }
790         return BNXT_TF_RC_SUCCESS;
791 }
792
793 /* Function to handle the parsing of RTE Flow item TCP Header. */
794 int32_t
795 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
796                         struct ulp_rte_parser_params *params)
797 {
798         const struct rte_flow_item_tcp *tcp_spec = item->spec;
799         const struct rte_flow_item_tcp *tcp_mask = item->mask;
800         struct ulp_rte_hdr_field *field;
801         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
802         uint32_t idx = params->field_idx;
803         uint32_t size;
804         uint32_t inner_l4, outer_l4;
805
806         inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
807         if (inner_l4) {
808                 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
809                 return BNXT_TF_RC_ERROR;
810         }
811
812         /*
813          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
814          * header fields
815          */
816         if (tcp_spec) {
817                 size = sizeof(tcp_spec->hdr.src_port);
818                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
819                                                 &tcp_spec->hdr.src_port,
820                                                 size);
821                 size = sizeof(tcp_spec->hdr.dst_port);
822                 field = ulp_rte_parser_fld_copy(field,
823                                                 &tcp_spec->hdr.dst_port,
824                                                 size);
825                 size = sizeof(tcp_spec->hdr.sent_seq);
826                 field = ulp_rte_parser_fld_copy(field,
827                                                 &tcp_spec->hdr.sent_seq,
828                                                 size);
829                 size = sizeof(tcp_spec->hdr.recv_ack);
830                 field = ulp_rte_parser_fld_copy(field,
831                                                 &tcp_spec->hdr.recv_ack,
832                                                 size);
833                 size = sizeof(tcp_spec->hdr.data_off);
834                 field = ulp_rte_parser_fld_copy(field,
835                                                 &tcp_spec->hdr.data_off,
836                                                 size);
837                 size = sizeof(tcp_spec->hdr.tcp_flags);
838                 field = ulp_rte_parser_fld_copy(field,
839                                                 &tcp_spec->hdr.tcp_flags,
840                                                 size);
841                 size = sizeof(tcp_spec->hdr.rx_win);
842                 field = ulp_rte_parser_fld_copy(field,
843                                                 &tcp_spec->hdr.rx_win,
844                                                 size);
845                 size = sizeof(tcp_spec->hdr.cksum);
846                 field = ulp_rte_parser_fld_copy(field,
847                                                 &tcp_spec->hdr.cksum,
848                                                 size);
849                 size = sizeof(tcp_spec->hdr.tcp_urp);
850                 field = ulp_rte_parser_fld_copy(field,
851                                                 &tcp_spec->hdr.tcp_urp,
852                                                 size);
853         } else {
854                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
855         }
856
857         if (tcp_mask) {
858                 ulp_rte_prsr_mask_copy(params, &idx,
859                                        &tcp_mask->hdr.src_port,
860                                        sizeof(tcp_mask->hdr.src_port));
861                 ulp_rte_prsr_mask_copy(params, &idx,
862                                        &tcp_mask->hdr.dst_port,
863                                        sizeof(tcp_mask->hdr.dst_port));
864                 ulp_rte_prsr_mask_copy(params, &idx,
865                                        &tcp_mask->hdr.sent_seq,
866                                        sizeof(tcp_mask->hdr.sent_seq));
867                 ulp_rte_prsr_mask_copy(params, &idx,
868                                        &tcp_mask->hdr.recv_ack,
869                                        sizeof(tcp_mask->hdr.recv_ack));
870                 ulp_rte_prsr_mask_copy(params, &idx,
871                                        &tcp_mask->hdr.data_off,
872                                        sizeof(tcp_mask->hdr.data_off));
873                 ulp_rte_prsr_mask_copy(params, &idx,
874                                        &tcp_mask->hdr.tcp_flags,
875                                        sizeof(tcp_mask->hdr.tcp_flags));
876                 ulp_rte_prsr_mask_copy(params, &idx,
877                                        &tcp_mask->hdr.rx_win,
878                                        sizeof(tcp_mask->hdr.rx_win));
879                 ulp_rte_prsr_mask_copy(params, &idx,
880                                        &tcp_mask->hdr.cksum,
881                                        sizeof(tcp_mask->hdr.cksum));
882                 ulp_rte_prsr_mask_copy(params, &idx,
883                                        &tcp_mask->hdr.tcp_urp,
884                                        sizeof(tcp_mask->hdr.tcp_urp));
885         }
886         /* add number of TCP header elements */
887         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
888
889         /* Set the udp header bitmap and computed l4 header bitmaps */
890         outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
891         if (outer_l4 ||
892             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
893             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
894                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
895                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
896         } else {
897                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
898                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
899         }
900         return BNXT_TF_RC_SUCCESS;
901 }
902
903 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
904 int32_t
905 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
906                           struct ulp_rte_parser_params *params)
907 {
908         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
909         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
910         struct ulp_rte_hdr_field *field;
911         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
912         uint32_t idx = params->field_idx;
913         uint32_t size;
914
915         /*
916          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
917          * header fields
918          */
919         if (vxlan_spec) {
920                 size = sizeof(vxlan_spec->flags);
921                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
922                                                 &vxlan_spec->flags,
923                                                 size);
924                 size = sizeof(vxlan_spec->rsvd0);
925                 field = ulp_rte_parser_fld_copy(field,
926                                                 &vxlan_spec->rsvd0,
927                                                 size);
928                 size = sizeof(vxlan_spec->vni);
929                 field = ulp_rte_parser_fld_copy(field,
930                                                 &vxlan_spec->vni,
931                                                 size);
932                 size = sizeof(vxlan_spec->rsvd1);
933                 field = ulp_rte_parser_fld_copy(field,
934                                                 &vxlan_spec->rsvd1,
935                                                 size);
936         }
937         if (vxlan_mask) {
938                 ulp_rte_prsr_mask_copy(params, &idx,
939                                        &vxlan_mask->flags,
940                                        sizeof(vxlan_mask->flags));
941                 ulp_rte_prsr_mask_copy(params, &idx,
942                                        &vxlan_mask->rsvd0,
943                                        sizeof(vxlan_mask->rsvd0));
944                 ulp_rte_prsr_mask_copy(params, &idx,
945                                        &vxlan_mask->vni,
946                                        sizeof(vxlan_mask->vni));
947                 ulp_rte_prsr_mask_copy(params, &idx,
948                                        &vxlan_mask->rsvd1,
949                                        sizeof(vxlan_mask->rsvd1));
950         }
951         /* Add number of vxlan header elements */
952         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
953
954         /* Update the hdr_bitmap with vxlan */
955         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
956         return BNXT_TF_RC_SUCCESS;
957 }
958
959 /* Function to handle the parsing of RTE Flow item void Header */
960 int32_t
961 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
962                          struct ulp_rte_parser_params *params __rte_unused)
963 {
964         return BNXT_TF_RC_SUCCESS;
965 }
966
967 /* Function to handle the parsing of RTE Flow action void Header. */
968 int32_t
969 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
970                          struct ulp_rte_parser_params *params __rte_unused)
971 {
972         return BNXT_TF_RC_SUCCESS;
973 }
974
975 /* Function to handle the parsing of RTE Flow action Mark Header. */
976 int32_t
977 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
978                          struct ulp_rte_parser_params *param)
979 {
980         const struct rte_flow_action_mark *mark;
981         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
982         uint32_t mark_id;
983
984         mark = action_item->conf;
985         if (mark) {
986                 mark_id = tfp_cpu_to_be_32(mark->id);
987                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
988                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
989
990                 /* Update the hdr_bitmap with vxlan */
991                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
992                 return BNXT_TF_RC_SUCCESS;
993         }
994         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
995         return BNXT_TF_RC_ERROR;
996 }
997
998 /* Function to handle the parsing of RTE Flow action RSS Header. */
999 int32_t
1000 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1001                         struct ulp_rte_parser_params *param)
1002 {
1003         const struct rte_flow_action_rss *rss = action_item->conf;
1004
1005         if (rss) {
1006                 /* Update the hdr_bitmap with vxlan */
1007                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1008                 return BNXT_TF_RC_SUCCESS;
1009         }
1010         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1011         return BNXT_TF_RC_ERROR;
1012 }
1013
1014 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1015 int32_t
1016 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1017                                 struct ulp_rte_parser_params *params)
1018 {
1019         const struct rte_flow_action_vxlan_encap *vxlan_encap;
1020         const struct rte_flow_item *item;
1021         const struct rte_flow_item_eth *eth_spec;
1022         const struct rte_flow_item_ipv4 *ipv4_spec;
1023         const struct rte_flow_item_ipv6 *ipv6_spec;
1024         struct rte_flow_item_vxlan vxlan_spec;
1025         uint32_t vlan_num = 0, vlan_size = 0;
1026         uint32_t ip_size = 0, ip_type = 0;
1027         uint32_t vxlan_size = 0;
1028         uint8_t *buff;
1029         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1030         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1031                                     0x00, 0x40, 0x11};
1032         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1033         struct ulp_rte_act_prop *ap = &params->act_prop;
1034
1035         vxlan_encap = action_item->conf;
1036         if (!vxlan_encap) {
1037                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1038                 return BNXT_TF_RC_ERROR;
1039         }
1040
1041         item = vxlan_encap->definition;
1042         if (!item) {
1043                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1044                 return BNXT_TF_RC_ERROR;
1045         }
1046
1047         if (!ulp_rte_item_skip_void(&item, 0))
1048                 return BNXT_TF_RC_ERROR;
1049
1050         /* must have ethernet header */
1051         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1052                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1053                 return BNXT_TF_RC_ERROR;
1054         }
1055         eth_spec = item->spec;
1056         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1057         ulp_encap_buffer_copy(buff,
1058                               eth_spec->dst.addr_bytes,
1059                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1060
1061         /* Goto the next item */
1062         if (!ulp_rte_item_skip_void(&item, 1))
1063                 return BNXT_TF_RC_ERROR;
1064
1065         /* May have vlan header */
1066         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1067                 vlan_num++;
1068                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1069                 ulp_encap_buffer_copy(buff,
1070                                       item->spec,
1071                                       sizeof(struct rte_flow_item_vlan));
1072
1073                 if (!ulp_rte_item_skip_void(&item, 1))
1074                         return BNXT_TF_RC_ERROR;
1075         }
1076
1077         /* may have two vlan headers */
1078         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1079                 vlan_num++;
1080                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1081                        sizeof(struct rte_flow_item_vlan)],
1082                        item->spec,
1083                        sizeof(struct rte_flow_item_vlan));
1084                 if (!ulp_rte_item_skip_void(&item, 1))
1085                         return BNXT_TF_RC_ERROR;
1086         }
1087         /* Update the vlan count and size of more than one */
1088         if (vlan_num) {
1089                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1090                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1091                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1092                        &vlan_num,
1093                        sizeof(uint32_t));
1094                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1095                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1096                        &vlan_size,
1097                        sizeof(uint32_t));
1098         }
1099
1100         /* L3 must be IPv4, IPv6 */
1101         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1102                 ipv4_spec = item->spec;
1103                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1104
1105                 /* copy the ipv4 details */
1106                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1107                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1108                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1109                         ulp_encap_buffer_copy(buff,
1110                                               def_ipv4_hdr,
1111                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1112                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1113                 } else {
1114                         const uint8_t *tmp_buff;
1115
1116                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1117                         ulp_encap_buffer_copy(buff,
1118                                               &ipv4_spec->hdr.version_ihl,
1119                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1120                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1121                              BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1122                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1123                         ulp_encap_buffer_copy(buff,
1124                                               tmp_buff,
1125                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1126                 }
1127                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1128                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1129                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1130                 ulp_encap_buffer_copy(buff,
1131                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1132                                       BNXT_ULP_ENCAP_IPV4_DEST_IP);
1133
1134                 /* Update the ip size details */
1135                 ip_size = tfp_cpu_to_be_32(ip_size);
1136                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1137                        &ip_size, sizeof(uint32_t));
1138
1139                 /* update the ip type */
1140                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1141                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1142                        &ip_type, sizeof(uint32_t));
1143
1144                 /* update the computed field to notify it is ipv4 header */
1145                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1146                                     1);
1147
1148                 if (!ulp_rte_item_skip_void(&item, 1))
1149                         return BNXT_TF_RC_ERROR;
1150         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1151                 ipv6_spec = item->spec;
1152                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1153
1154                 /* copy the ipv4 details */
1155                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1156                        ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1157
1158                 /* Update the ip size details */
1159                 ip_size = tfp_cpu_to_be_32(ip_size);
1160                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1161                        &ip_size, sizeof(uint32_t));
1162
1163                  /* update the ip type */
1164                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1165                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1166                        &ip_type, sizeof(uint32_t));
1167
1168                 /* update the computed field to notify it is ipv6 header */
1169                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1170                                     1);
1171
1172                 if (!ulp_rte_item_skip_void(&item, 1))
1173                         return BNXT_TF_RC_ERROR;
1174         } else {
1175                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1176                 return BNXT_TF_RC_ERROR;
1177         }
1178
1179         /* L4 is UDP */
1180         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1181                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1182                 return BNXT_TF_RC_ERROR;
1183         }
1184         /* copy the udp details */
1185         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1186                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1187
1188         if (!ulp_rte_item_skip_void(&item, 1))
1189                 return BNXT_TF_RC_ERROR;
1190
1191         /* Finally VXLAN */
1192         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1193                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1194                 return BNXT_TF_RC_ERROR;
1195         }
1196         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1197         /* copy the vxlan details */
1198         memcpy(&vxlan_spec, item->spec, vxlan_size);
1199         vxlan_spec.flags = 0x08;
1200         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1201                               (const uint8_t *)&vxlan_spec,
1202                               vxlan_size);
1203         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1204         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1205                &vxlan_size, sizeof(uint32_t));
1206
1207         /*update the hdr_bitmap with vxlan */
1208         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1209         return BNXT_TF_RC_SUCCESS;
1210 }
1211
1212 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1213 int32_t
1214 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1215                                 __rte_unused,
1216                                 struct ulp_rte_parser_params *params)
1217 {
1218         /* update the hdr_bitmap with vxlan */
1219         ULP_BITMAP_SET(params->act_bitmap.bits,
1220                        BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1221         return BNXT_TF_RC_SUCCESS;
1222 }
1223
1224 /* Function to handle the parsing of RTE Flow action drop Header. */
1225 int32_t
1226 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1227                          struct ulp_rte_parser_params *params)
1228 {
1229         /* Update the hdr_bitmap with drop */
1230         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1231         return BNXT_TF_RC_SUCCESS;
1232 }
1233
1234 /* Function to handle the parsing of RTE Flow action count. */
1235 int32_t
1236 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1237                           struct ulp_rte_parser_params *params)
1238
1239 {
1240         const struct rte_flow_action_count *act_count;
1241         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1242
1243         act_count = action_item->conf;
1244         if (act_count) {
1245                 if (act_count->shared) {
1246                         BNXT_TF_DBG(ERR,
1247                                     "Parse Error:Shared count not supported\n");
1248                         return BNXT_TF_RC_PARSE_ERR;
1249                 }
1250                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1251                        &act_count->id,
1252                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1253         }
1254
1255         /* Update the hdr_bitmap with count */
1256         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1257         return BNXT_TF_RC_SUCCESS;
1258 }
1259
1260 /* Function to handle the parsing of RTE Flow action PF. */
1261 int32_t
1262 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1263                        struct ulp_rte_parser_params *params)
1264 {
1265         uint32_t svif;
1266
1267         /* Update the hdr_bitmap with vnic bit */
1268         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1269
1270         /* copy the PF of the current device into VNIC Property */
1271         svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1272         svif = bnxt_get_vnic_id(svif, BNXT_ULP_INTF_TYPE_INVALID);
1273         svif = rte_cpu_to_be_32(svif);
1274         memcpy(&params->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1275                &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1276
1277         return BNXT_TF_RC_SUCCESS;
1278 }
1279
1280 /* Function to handle the parsing of RTE Flow action VF. */
1281 int32_t
1282 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1283                        struct ulp_rte_parser_params *param)
1284 {
1285         const struct rte_flow_action_vf *vf_action;
1286         uint32_t pid;
1287
1288         vf_action = action_item->conf;
1289         if (vf_action) {
1290                 if (vf_action->original) {
1291                         BNXT_TF_DBG(ERR,
1292                                     "Parse Error:VF Original not supported\n");
1293                         return BNXT_TF_RC_PARSE_ERR;
1294                 }
1295                 /* TBD: Update the computed VNIC using VF conversion */
1296                 pid = bnxt_get_vnic_id(vf_action->id,
1297                                        BNXT_ULP_INTF_TYPE_INVALID);
1298                 pid = rte_cpu_to_be_32(pid);
1299                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1300                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1301         }
1302
1303         /* Update the hdr_bitmap with count */
1304         ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1305         return BNXT_TF_RC_SUCCESS;
1306 }
1307
1308 /* Function to handle the parsing of RTE Flow action port_id. */
1309 int32_t
1310 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1311                             struct ulp_rte_parser_params *param)
1312 {
1313         const struct rte_flow_action_port_id *port_id;
1314         uint32_t pid;
1315
1316         port_id = act_item->conf;
1317         if (port_id) {
1318                 if (port_id->original) {
1319                         BNXT_TF_DBG(ERR,
1320                                     "ParseErr:Portid Original not supported\n");
1321                         return BNXT_TF_RC_PARSE_ERR;
1322                 }
1323                 /* TBD: Update the computed VNIC using port conversion */
1324                 pid = bnxt_get_vnic_id(port_id->id, BNXT_ULP_INTF_TYPE_INVALID);
1325                 pid = rte_cpu_to_be_32(pid);
1326                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1327                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1328         }
1329
1330         /* Update the hdr_bitmap with count */
1331         ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1332         return BNXT_TF_RC_SUCCESS;
1333 }
1334
1335 /* Function to handle the parsing of RTE Flow action phy_port. */
1336 int32_t
1337 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1338                              struct ulp_rte_parser_params *prm)
1339 {
1340         const struct rte_flow_action_phy_port *phy_port;
1341         uint32_t vport;
1342
1343         phy_port = action_item->conf;
1344         if (phy_port) {
1345                 if (phy_port->original) {
1346                         BNXT_TF_DBG(ERR,
1347                                     "Parse Err:Port Original not supported\n");
1348                         return BNXT_TF_RC_PARSE_ERR;
1349                 }
1350                 /* Get the vport of the physical port */
1351                 /* TBD: shall be changed later to portdb call */
1352                 vport = 1 << phy_port->index;
1353                 vport = rte_cpu_to_be_32(vport);
1354                 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1355                        &vport, BNXT_ULP_ACT_PROP_SZ_VPORT);
1356         }
1357
1358         /* Update the hdr_bitmap with count */
1359         ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1360         return BNXT_TF_RC_SUCCESS;
1361 }