net/bnxt: support action bitmap opcode
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
12 #include "tfp.h"
13 #include "ulp_port_db.h"
14
15 /* Utility function to skip the void items. */
16 static inline int32_t
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
18 {
19         if (!*item)
20                 return 0;
21         if (increment)
22                 (*item)++;
23         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
24                 (*item)++;
25         if (*item)
26                 return 1;
27         return 0;
28 }
29
30 /* Utility function to update the field_bitmap */
31 static void
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
33                                    uint32_t idx)
34 {
35         struct ulp_rte_hdr_field *field;
36
37         field = &params->hdr_field[idx];
38         if (ulp_bitmap_notzero(field->mask, field->size)) {
39                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
40                 /* Not exact match */
41                 if (!ulp_bitmap_is_ones(field->mask, field->size))
42                         ULP_BITMAP_SET(params->fld_bitmap.bits,
43                                        BNXT_ULP_MATCH_TYPE_BITMASK_WM);
44         } else {
45                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
46         }
47 }
48
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
52                         const void *buffer,
53                         uint32_t size)
54 {
55         field->size = size;
56         memcpy(field->spec, buffer, field->size);
57         field++;
58         return field;
59 }
60
61 /* Utility function to copy field masks items */
62 static void
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
64                        uint32_t *idx,
65                        const void *buffer,
66                        uint32_t size)
67 {
68         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
69
70         memcpy(field->mask, buffer, size);
71         ulp_rte_parser_field_bitmap_update(params, *idx);
72         *idx = *idx + 1;
73 }
74
75 /*
76  * Function to handle the parsing of RTE Flows and placing
77  * the RTE flow items into the ulp structures.
78  */
79 int32_t
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81                               struct ulp_rte_parser_params *params)
82 {
83         const struct rte_flow_item *item = pattern;
84         struct bnxt_ulp_rte_hdr_info *hdr_info;
85
86         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87         if (params->dir == ULP_DIR_EGRESS)
88                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
89                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
90
91         /* Parse all the items in the pattern */
92         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
93                 /* get the header information from the flow_hdr_info table */
94                 hdr_info = &ulp_hdr_info[item->type];
95                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
96                         BNXT_TF_DBG(ERR,
97                                     "Truflow parser does not support type %d\n",
98                                     item->type);
99                         return BNXT_TF_RC_PARSE_ERR;
100                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
101                         /* call the registered callback handler */
102                         if (hdr_info->proto_hdr_func) {
103                                 if (hdr_info->proto_hdr_func(item, params) !=
104                                     BNXT_TF_RC_SUCCESS) {
105                                         return BNXT_TF_RC_ERROR;
106                                 }
107                         }
108                 }
109                 item++;
110         }
111         /* update the implied SVIF */
112         (void)ulp_rte_parser_svif_process(params);
113         return BNXT_TF_RC_SUCCESS;
114 }
115
116 /*
117  * Function to handle the parsing of RTE Flows and placing
118  * the RTE flow actions into the ulp structures.
119  */
120 int32_t
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122                               struct ulp_rte_parser_params *params)
123 {
124         const struct rte_flow_action *action_item = actions;
125         struct bnxt_ulp_rte_act_info *hdr_info;
126
127         if (params->dir == ULP_DIR_EGRESS)
128                 ULP_BITMAP_SET(params->act_bitmap.bits,
129                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
130
131         /* Parse all the items in the pattern */
132         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
133                 /* get the header information from the flow_hdr_info table */
134                 hdr_info = &ulp_act_info[action_item->type];
135                 if (hdr_info->act_type ==
136                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
137                         BNXT_TF_DBG(ERR,
138                                     "Truflow parser does not support act %u\n",
139                                     action_item->type);
140                         return BNXT_TF_RC_ERROR;
141                 } else if (hdr_info->act_type ==
142                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
143                         /* call the registered callback handler */
144                         if (hdr_info->proto_act_func) {
145                                 if (hdr_info->proto_act_func(action_item,
146                                                              params) !=
147                                     BNXT_TF_RC_SUCCESS) {
148                                         return BNXT_TF_RC_ERROR;
149                                 }
150                         }
151                 }
152                 action_item++;
153         }
154         /* update the implied VNIC */
155         ulp_rte_parser_vnic_process(params);
156         return BNXT_TF_RC_SUCCESS;
157 }
158
159 /* Function to handle the parsing of RTE Flow item PF Header. */
160 static int32_t
161 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
162                         enum rte_flow_item_type proto,
163                         uint16_t svif,
164                         uint16_t mask)
165 {
166         uint16_t port_id = svif;
167         uint32_t dir = 0;
168         struct ulp_rte_hdr_field *hdr_field;
169         uint32_t ifindex;
170         int32_t rc;
171
172         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
173             BNXT_ULP_INVALID_SVIF_VAL) {
174                 BNXT_TF_DBG(ERR,
175                             "SVIF already set,multiple source not support'd\n");
176                 return BNXT_TF_RC_ERROR;
177         }
178
179         if (proto == RTE_FLOW_ITEM_TYPE_PORT_ID) {
180                 dir = ULP_COMP_FLD_IDX_RD(params,
181                                           BNXT_ULP_CF_IDX_DIRECTION);
182                 /* perform the conversion from dpdk port to bnxt svif */
183                 rc = ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
184                                                        &ifindex);
185                 if (rc) {
186                         BNXT_TF_DBG(ERR,
187                                     "Invalid port id\n");
188                         return BNXT_TF_RC_ERROR;
189                 }
190                 ulp_port_db_svif_get(params->ulp_ctx, ifindex, dir, &svif);
191                 svif = rte_cpu_to_be_16(svif);
192         }
193         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
194         memcpy(hdr_field->spec, &svif, sizeof(svif));
195         memcpy(hdr_field->mask, &mask, sizeof(mask));
196         hdr_field->size = sizeof(svif);
197         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
198                             rte_be_to_cpu_16(svif));
199         return BNXT_TF_RC_SUCCESS;
200 }
201
202 /* Function to handle the parsing of the RTE port id */
203 int32_t
204 ulp_rte_parser_svif_process(struct ulp_rte_parser_params *params)
205 {
206         uint16_t port_id = 0;
207         uint16_t svif_mask = 0xFFFF;
208
209         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
210             BNXT_ULP_INVALID_SVIF_VAL)
211                 return BNXT_TF_RC_SUCCESS;
212
213         /* SVIF not set. So get the port id */
214         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
215
216         /* Update the SVIF details */
217         return ulp_rte_parser_svif_set(params, RTE_FLOW_ITEM_TYPE_PORT_ID,
218                                        port_id, svif_mask);
219 }
220
221 /* Function to handle the implicit VNIC RTE port id */
222 int32_t
223 ulp_rte_parser_vnic_process(struct ulp_rte_parser_params *params)
224 {
225         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
226
227         if (ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VNIC) ||
228             ULP_BITMAP_ISSET(act->bits, BNXT_ULP_ACTION_BIT_VPORT))
229                 return BNXT_TF_RC_SUCCESS;
230
231         /* Update the vnic details */
232         ulp_rte_pf_act_handler(NULL, params);
233         return BNXT_TF_RC_SUCCESS;
234 }
235
236 /* Function to handle the parsing of RTE Flow item PF Header. */
237 int32_t
238 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
239                        struct ulp_rte_parser_params *params)
240 {
241         uint16_t port_id = 0;
242         uint16_t svif_mask = 0xFFFF;
243
244         /* Get the port id */
245         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
246
247         /* Update the SVIF details */
248         return ulp_rte_parser_svif_set(params,
249                                        item->type,
250                                        port_id, svif_mask);
251 }
252
253 /* Function to handle the parsing of RTE Flow item VF Header. */
254 int32_t
255 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
256                        struct ulp_rte_parser_params *params)
257 {
258         const struct rte_flow_item_vf *vf_spec = item->spec;
259         const struct rte_flow_item_vf *vf_mask = item->mask;
260         uint16_t svif = 0, mask = 0;
261
262         /* Get VF rte_flow_item for Port details */
263         if (vf_spec)
264                 svif = (uint16_t)vf_spec->id;
265         if (vf_mask)
266                 mask = (uint16_t)vf_mask->id;
267
268         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
269 }
270
271 /* Function to handle the parsing of RTE Flow item port id  Header. */
272 int32_t
273 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
274                             struct ulp_rte_parser_params *params)
275 {
276         const struct rte_flow_item_port_id *port_spec = item->spec;
277         const struct rte_flow_item_port_id *port_mask = item->mask;
278         uint16_t svif = 0, mask = 0;
279
280         /*
281          * Copy the rte_flow_item for Port into hdr_field using port id
282          * header fields.
283          */
284         if (port_spec)
285                 svif = (uint16_t)port_spec->id;
286         if (port_mask)
287                 mask = (uint16_t)port_mask->id;
288
289         /* Update the SVIF details */
290         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
291 }
292
293 /* Function to handle the parsing of RTE Flow item phy port Header. */
294 int32_t
295 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
296                              struct ulp_rte_parser_params *params)
297 {
298         const struct rte_flow_item_phy_port *port_spec = item->spec;
299         const struct rte_flow_item_phy_port *port_mask = item->mask;
300         uint32_t svif = 0, mask = 0;
301
302         /* Copy the rte_flow_item for phy port into hdr_field */
303         if (port_spec)
304                 svif = port_spec->index;
305         if (port_mask)
306                 mask = port_mask->index;
307
308         /* Update the SVIF details */
309         return ulp_rte_parser_svif_set(params, item->type, svif, mask);
310 }
311
312 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
313 int32_t
314 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
315                         struct ulp_rte_parser_params *params)
316 {
317         const struct rte_flow_item_eth *eth_spec = item->spec;
318         const struct rte_flow_item_eth *eth_mask = item->mask;
319         struct ulp_rte_hdr_field *field;
320         uint32_t idx = params->field_idx;
321         uint64_t set_flag = 0;
322         uint32_t size;
323
324         /*
325          * Copy the rte_flow_item for eth into hdr_field using ethernet
326          * header fields
327          */
328         if (eth_spec) {
329                 size = sizeof(eth_spec->dst.addr_bytes);
330                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
331                                                 eth_spec->dst.addr_bytes,
332                                                 size);
333                 size = sizeof(eth_spec->src.addr_bytes);
334                 field = ulp_rte_parser_fld_copy(field,
335                                                 eth_spec->src.addr_bytes,
336                                                 size);
337                 field = ulp_rte_parser_fld_copy(field,
338                                                 &eth_spec->type,
339                                                 sizeof(eth_spec->type));
340         }
341         if (eth_mask) {
342                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
343                                        sizeof(eth_mask->dst.addr_bytes));
344                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
345                                        sizeof(eth_mask->src.addr_bytes));
346                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
347                                        sizeof(eth_mask->type));
348         }
349         /* Add number of vlan header elements */
350         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
351         params->vlan_idx = params->field_idx;
352         params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
353
354         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
355         set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
356                                     BNXT_ULP_HDR_BIT_O_ETH);
357         if (set_flag)
358                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
359         else
360                 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
361                                  BNXT_ULP_HDR_BIT_I_ETH);
362
363         /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
364         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
365
366         return BNXT_TF_RC_SUCCESS;
367 }
368
369 /* Function to handle the parsing of RTE Flow item Vlan Header. */
370 int32_t
371 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
372                          struct ulp_rte_parser_params *params)
373 {
374         const struct rte_flow_item_vlan *vlan_spec = item->spec;
375         const struct rte_flow_item_vlan *vlan_mask = item->mask;
376         struct ulp_rte_hdr_field *field;
377         struct ulp_rte_hdr_bitmap       *hdr_bit;
378         uint32_t idx = params->vlan_idx;
379         uint16_t vlan_tag, priority;
380         uint32_t outer_vtag_num;
381         uint32_t inner_vtag_num;
382
383         /*
384          * Copy the rte_flow_item for vlan into hdr_field using Vlan
385          * header fields
386          */
387         if (vlan_spec) {
388                 vlan_tag = ntohs(vlan_spec->tci);
389                 priority = htons(vlan_tag >> 13);
390                 vlan_tag &= 0xfff;
391                 vlan_tag = htons(vlan_tag);
392
393                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
394                                                 &priority,
395                                                 sizeof(priority));
396                 field = ulp_rte_parser_fld_copy(field,
397                                                 &vlan_tag,
398                                                 sizeof(vlan_tag));
399                 field = ulp_rte_parser_fld_copy(field,
400                                                 &vlan_spec->inner_type,
401                                                 sizeof(vlan_spec->inner_type));
402         }
403
404         if (vlan_mask) {
405                 vlan_tag = ntohs(vlan_mask->tci);
406                 priority = htons(vlan_tag >> 13);
407                 vlan_tag &= 0xfff;
408                 vlan_tag = htons(vlan_tag);
409
410                 field = &params->hdr_field[idx];
411                 memcpy(field->mask, &priority, field->size);
412                 field++;
413                 memcpy(field->mask, &vlan_tag, field->size);
414                 field++;
415                 memcpy(field->mask, &vlan_mask->inner_type, field->size);
416         }
417         /* Set the vlan index to new incremented value */
418         params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
419
420         /* Get the outer tag and inner tag counts */
421         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
422                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
423         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
424                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
425
426         /* Update the hdr_bitmap of the vlans */
427         hdr_bit = &params->hdr_bitmap;
428         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
429             !outer_vtag_num) {
430                 /* Update the vlan tag num */
431                 outer_vtag_num++;
432                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
433                                     outer_vtag_num);
434                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_PRESENT, 1);
435         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
436                    ULP_COMP_FLD_IDX_RD(params,
437                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
438                    outer_vtag_num == 1) {
439                 /* update the vlan tag num */
440                 outer_vtag_num++;
441                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
442                                     outer_vtag_num);
443                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
444         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
445                    ULP_COMP_FLD_IDX_RD(params,
446                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
447                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
448                    !inner_vtag_num) {
449                 /* update the vlan tag num */
450                 inner_vtag_num++;
451                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
452                                     inner_vtag_num);
453                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_PRESENT, 1);
454         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
455                    ULP_COMP_FLD_IDX_RD(params,
456                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
457                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
458                    ULP_COMP_FLD_IDX_RD(params,
459                                        BNXT_ULP_CF_IDX_O_VTAG_PRESENT) &&
460                    inner_vtag_num == 1) {
461                 /* update the vlan tag num */
462                 inner_vtag_num++;
463                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
464                                     inner_vtag_num);
465                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
466         } else {
467                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
468                 return BNXT_TF_RC_ERROR;
469         }
470         return BNXT_TF_RC_SUCCESS;
471 }
472
473 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
474 int32_t
475 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
476                          struct ulp_rte_parser_params *params)
477 {
478         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
479         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
480         struct ulp_rte_hdr_field *field;
481         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
482         uint32_t idx = params->field_idx;
483         uint32_t size;
484         uint32_t inner_l3, outer_l3;
485
486         inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
487         if (inner_l3) {
488                 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
489                 return BNXT_TF_RC_ERROR;
490         }
491
492         /*
493          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
494          * header fields
495          */
496         if (ipv4_spec) {
497                 size = sizeof(ipv4_spec->hdr.version_ihl);
498                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
499                                                 &ipv4_spec->hdr.version_ihl,
500                                                 size);
501                 size = sizeof(ipv4_spec->hdr.type_of_service);
502                 field = ulp_rte_parser_fld_copy(field,
503                                                 &ipv4_spec->hdr.type_of_service,
504                                                 size);
505                 size = sizeof(ipv4_spec->hdr.total_length);
506                 field = ulp_rte_parser_fld_copy(field,
507                                                 &ipv4_spec->hdr.total_length,
508                                                 size);
509                 size = sizeof(ipv4_spec->hdr.packet_id);
510                 field = ulp_rte_parser_fld_copy(field,
511                                                 &ipv4_spec->hdr.packet_id,
512                                                 size);
513                 size = sizeof(ipv4_spec->hdr.fragment_offset);
514                 field = ulp_rte_parser_fld_copy(field,
515                                                 &ipv4_spec->hdr.fragment_offset,
516                                                 size);
517                 size = sizeof(ipv4_spec->hdr.time_to_live);
518                 field = ulp_rte_parser_fld_copy(field,
519                                                 &ipv4_spec->hdr.time_to_live,
520                                                 size);
521                 size = sizeof(ipv4_spec->hdr.next_proto_id);
522                 field = ulp_rte_parser_fld_copy(field,
523                                                 &ipv4_spec->hdr.next_proto_id,
524                                                 size);
525                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
526                 field = ulp_rte_parser_fld_copy(field,
527                                                 &ipv4_spec->hdr.hdr_checksum,
528                                                 size);
529                 size = sizeof(ipv4_spec->hdr.src_addr);
530                 field = ulp_rte_parser_fld_copy(field,
531                                                 &ipv4_spec->hdr.src_addr,
532                                                 size);
533                 size = sizeof(ipv4_spec->hdr.dst_addr);
534                 field = ulp_rte_parser_fld_copy(field,
535                                                 &ipv4_spec->hdr.dst_addr,
536                                                 size);
537         }
538         if (ipv4_mask) {
539                 ulp_rte_prsr_mask_copy(params, &idx,
540                                        &ipv4_mask->hdr.version_ihl,
541                                        sizeof(ipv4_mask->hdr.version_ihl));
542                 ulp_rte_prsr_mask_copy(params, &idx,
543                                        &ipv4_mask->hdr.type_of_service,
544                                        sizeof(ipv4_mask->hdr.type_of_service));
545                 ulp_rte_prsr_mask_copy(params, &idx,
546                                        &ipv4_mask->hdr.total_length,
547                                        sizeof(ipv4_mask->hdr.total_length));
548                 ulp_rte_prsr_mask_copy(params, &idx,
549                                        &ipv4_mask->hdr.packet_id,
550                                        sizeof(ipv4_mask->hdr.packet_id));
551                 ulp_rte_prsr_mask_copy(params, &idx,
552                                        &ipv4_mask->hdr.fragment_offset,
553                                        sizeof(ipv4_mask->hdr.fragment_offset));
554                 ulp_rte_prsr_mask_copy(params, &idx,
555                                        &ipv4_mask->hdr.time_to_live,
556                                        sizeof(ipv4_mask->hdr.time_to_live));
557                 ulp_rte_prsr_mask_copy(params, &idx,
558                                        &ipv4_mask->hdr.next_proto_id,
559                                        sizeof(ipv4_mask->hdr.next_proto_id));
560                 ulp_rte_prsr_mask_copy(params, &idx,
561                                        &ipv4_mask->hdr.hdr_checksum,
562                                        sizeof(ipv4_mask->hdr.hdr_checksum));
563                 ulp_rte_prsr_mask_copy(params, &idx,
564                                        &ipv4_mask->hdr.src_addr,
565                                        sizeof(ipv4_mask->hdr.src_addr));
566                 ulp_rte_prsr_mask_copy(params, &idx,
567                                        &ipv4_mask->hdr.dst_addr,
568                                        sizeof(ipv4_mask->hdr.dst_addr));
569         }
570         /* Add the number of ipv4 header elements */
571         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
572
573         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
574         outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
575         if (outer_l3 ||
576             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
577             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
578                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
579                 inner_l3++;
580                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
581         } else {
582                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
583                 outer_l3++;
584                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
585         }
586         return BNXT_TF_RC_SUCCESS;
587 }
588
589 /* Function to handle the parsing of RTE Flow item IPV6 Header */
590 int32_t
591 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
592                          struct ulp_rte_parser_params *params)
593 {
594         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
595         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
596         struct ulp_rte_hdr_field *field;
597         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
598         uint32_t idx = params->field_idx;
599         uint32_t size;
600         uint32_t inner_l3, outer_l3;
601
602         inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
603         if (inner_l3) {
604                 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
605                 return BNXT_TF_RC_ERROR;
606         }
607
608         /*
609          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
610          * header fields
611          */
612         if (ipv6_spec) {
613                 size = sizeof(ipv6_spec->hdr.vtc_flow);
614                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
615                                                 &ipv6_spec->hdr.vtc_flow,
616                                                 size);
617                 size = sizeof(ipv6_spec->hdr.payload_len);
618                 field = ulp_rte_parser_fld_copy(field,
619                                                 &ipv6_spec->hdr.payload_len,
620                                                 size);
621                 size = sizeof(ipv6_spec->hdr.proto);
622                 field = ulp_rte_parser_fld_copy(field,
623                                                 &ipv6_spec->hdr.proto,
624                                                 size);
625                 size = sizeof(ipv6_spec->hdr.hop_limits);
626                 field = ulp_rte_parser_fld_copy(field,
627                                                 &ipv6_spec->hdr.hop_limits,
628                                                 size);
629                 size = sizeof(ipv6_spec->hdr.src_addr);
630                 field = ulp_rte_parser_fld_copy(field,
631                                                 &ipv6_spec->hdr.src_addr,
632                                                 size);
633                 size = sizeof(ipv6_spec->hdr.dst_addr);
634                 field = ulp_rte_parser_fld_copy(field,
635                                                 &ipv6_spec->hdr.dst_addr,
636                                                 size);
637         }
638         if (ipv6_mask) {
639                 ulp_rte_prsr_mask_copy(params, &idx,
640                                        &ipv6_mask->hdr.vtc_flow,
641                                        sizeof(ipv6_mask->hdr.vtc_flow));
642                 ulp_rte_prsr_mask_copy(params, &idx,
643                                        &ipv6_mask->hdr.payload_len,
644                                        sizeof(ipv6_mask->hdr.payload_len));
645                 ulp_rte_prsr_mask_copy(params, &idx,
646                                        &ipv6_mask->hdr.proto,
647                                        sizeof(ipv6_mask->hdr.proto));
648                 ulp_rte_prsr_mask_copy(params, &idx,
649                                        &ipv6_mask->hdr.hop_limits,
650                                        sizeof(ipv6_mask->hdr.hop_limits));
651                 ulp_rte_prsr_mask_copy(params, &idx,
652                                        &ipv6_mask->hdr.src_addr,
653                                        sizeof(ipv6_mask->hdr.src_addr));
654                 ulp_rte_prsr_mask_copy(params, &idx,
655                                        &ipv6_mask->hdr.dst_addr,
656                                        sizeof(ipv6_mask->hdr.dst_addr));
657         }
658         /* add number of ipv6 header elements */
659         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
660
661         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
662         outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
663         if (outer_l3 ||
664             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
665             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
666                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
667                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
668         } else {
669                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
670                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
671         }
672         return BNXT_TF_RC_SUCCESS;
673 }
674
675 /* Function to handle the parsing of RTE Flow item UDP Header. */
676 int32_t
677 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
678                         struct ulp_rte_parser_params *params)
679 {
680         const struct rte_flow_item_udp *udp_spec = item->spec;
681         const struct rte_flow_item_udp *udp_mask = item->mask;
682         struct ulp_rte_hdr_field *field;
683         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
684         uint32_t idx = params->field_idx;
685         uint32_t size;
686         uint32_t inner_l4, outer_l4;
687
688         inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
689         if (inner_l4) {
690                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
691                 return BNXT_TF_RC_ERROR;
692         }
693
694         /*
695          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
696          * header fields
697          */
698         if (udp_spec) {
699                 size = sizeof(udp_spec->hdr.src_port);
700                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
701                                                 &udp_spec->hdr.src_port,
702                                                 size);
703                 size = sizeof(udp_spec->hdr.dst_port);
704                 field = ulp_rte_parser_fld_copy(field,
705                                                 &udp_spec->hdr.dst_port,
706                                                 size);
707                 size = sizeof(udp_spec->hdr.dgram_len);
708                 field = ulp_rte_parser_fld_copy(field,
709                                                 &udp_spec->hdr.dgram_len,
710                                                 size);
711                 size = sizeof(udp_spec->hdr.dgram_cksum);
712                 field = ulp_rte_parser_fld_copy(field,
713                                                 &udp_spec->hdr.dgram_cksum,
714                                                 size);
715         }
716         if (udp_mask) {
717                 ulp_rte_prsr_mask_copy(params, &idx,
718                                        &udp_mask->hdr.src_port,
719                                        sizeof(udp_mask->hdr.src_port));
720                 ulp_rte_prsr_mask_copy(params, &idx,
721                                        &udp_mask->hdr.dst_port,
722                                        sizeof(udp_mask->hdr.dst_port));
723                 ulp_rte_prsr_mask_copy(params, &idx,
724                                        &udp_mask->hdr.dgram_len,
725                                        sizeof(udp_mask->hdr.dgram_len));
726                 ulp_rte_prsr_mask_copy(params, &idx,
727                                        &udp_mask->hdr.dgram_cksum,
728                                        sizeof(udp_mask->hdr.dgram_cksum));
729         }
730
731         /* Add number of UDP header elements */
732         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
733
734         /* Set the udp header bitmap and computed l4 header bitmaps */
735         outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
736         if (outer_l4 ||
737             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
738             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
739                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
740                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
741         } else {
742                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
743                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
744         }
745         return BNXT_TF_RC_SUCCESS;
746 }
747
748 /* Function to handle the parsing of RTE Flow item TCP Header. */
749 int32_t
750 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
751                         struct ulp_rte_parser_params *params)
752 {
753         const struct rte_flow_item_tcp *tcp_spec = item->spec;
754         const struct rte_flow_item_tcp *tcp_mask = item->mask;
755         struct ulp_rte_hdr_field *field;
756         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
757         uint32_t idx = params->field_idx;
758         uint32_t size;
759         uint32_t inner_l4, outer_l4;
760
761         inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
762         if (inner_l4) {
763                 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
764                 return BNXT_TF_RC_ERROR;
765         }
766
767         /*
768          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
769          * header fields
770          */
771         if (tcp_spec) {
772                 size = sizeof(tcp_spec->hdr.src_port);
773                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
774                                                 &tcp_spec->hdr.src_port,
775                                                 size);
776                 size = sizeof(tcp_spec->hdr.dst_port);
777                 field = ulp_rte_parser_fld_copy(field,
778                                                 &tcp_spec->hdr.dst_port,
779                                                 size);
780                 size = sizeof(tcp_spec->hdr.sent_seq);
781                 field = ulp_rte_parser_fld_copy(field,
782                                                 &tcp_spec->hdr.sent_seq,
783                                                 size);
784                 size = sizeof(tcp_spec->hdr.recv_ack);
785                 field = ulp_rte_parser_fld_copy(field,
786                                                 &tcp_spec->hdr.recv_ack,
787                                                 size);
788                 size = sizeof(tcp_spec->hdr.data_off);
789                 field = ulp_rte_parser_fld_copy(field,
790                                                 &tcp_spec->hdr.data_off,
791                                                 size);
792                 size = sizeof(tcp_spec->hdr.tcp_flags);
793                 field = ulp_rte_parser_fld_copy(field,
794                                                 &tcp_spec->hdr.tcp_flags,
795                                                 size);
796                 size = sizeof(tcp_spec->hdr.rx_win);
797                 field = ulp_rte_parser_fld_copy(field,
798                                                 &tcp_spec->hdr.rx_win,
799                                                 size);
800                 size = sizeof(tcp_spec->hdr.cksum);
801                 field = ulp_rte_parser_fld_copy(field,
802                                                 &tcp_spec->hdr.cksum,
803                                                 size);
804                 size = sizeof(tcp_spec->hdr.tcp_urp);
805                 field = ulp_rte_parser_fld_copy(field,
806                                                 &tcp_spec->hdr.tcp_urp,
807                                                 size);
808         } else {
809                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
810         }
811
812         if (tcp_mask) {
813                 ulp_rte_prsr_mask_copy(params, &idx,
814                                        &tcp_mask->hdr.src_port,
815                                        sizeof(tcp_mask->hdr.src_port));
816                 ulp_rte_prsr_mask_copy(params, &idx,
817                                        &tcp_mask->hdr.dst_port,
818                                        sizeof(tcp_mask->hdr.dst_port));
819                 ulp_rte_prsr_mask_copy(params, &idx,
820                                        &tcp_mask->hdr.sent_seq,
821                                        sizeof(tcp_mask->hdr.sent_seq));
822                 ulp_rte_prsr_mask_copy(params, &idx,
823                                        &tcp_mask->hdr.recv_ack,
824                                        sizeof(tcp_mask->hdr.recv_ack));
825                 ulp_rte_prsr_mask_copy(params, &idx,
826                                        &tcp_mask->hdr.data_off,
827                                        sizeof(tcp_mask->hdr.data_off));
828                 ulp_rte_prsr_mask_copy(params, &idx,
829                                        &tcp_mask->hdr.tcp_flags,
830                                        sizeof(tcp_mask->hdr.tcp_flags));
831                 ulp_rte_prsr_mask_copy(params, &idx,
832                                        &tcp_mask->hdr.rx_win,
833                                        sizeof(tcp_mask->hdr.rx_win));
834                 ulp_rte_prsr_mask_copy(params, &idx,
835                                        &tcp_mask->hdr.cksum,
836                                        sizeof(tcp_mask->hdr.cksum));
837                 ulp_rte_prsr_mask_copy(params, &idx,
838                                        &tcp_mask->hdr.tcp_urp,
839                                        sizeof(tcp_mask->hdr.tcp_urp));
840         }
841         /* add number of TCP header elements */
842         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
843
844         /* Set the udp header bitmap and computed l4 header bitmaps */
845         outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
846         if (outer_l4 ||
847             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
848             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
849                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
850                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
851         } else {
852                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
853                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
854         }
855         return BNXT_TF_RC_SUCCESS;
856 }
857
858 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
859 int32_t
860 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
861                           struct ulp_rte_parser_params *params)
862 {
863         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
864         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
865         struct ulp_rte_hdr_field *field;
866         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
867         uint32_t idx = params->field_idx;
868         uint32_t size;
869
870         /*
871          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
872          * header fields
873          */
874         if (vxlan_spec) {
875                 size = sizeof(vxlan_spec->flags);
876                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
877                                                 &vxlan_spec->flags,
878                                                 size);
879                 size = sizeof(vxlan_spec->rsvd0);
880                 field = ulp_rte_parser_fld_copy(field,
881                                                 &vxlan_spec->rsvd0,
882                                                 size);
883                 size = sizeof(vxlan_spec->vni);
884                 field = ulp_rte_parser_fld_copy(field,
885                                                 &vxlan_spec->vni,
886                                                 size);
887                 size = sizeof(vxlan_spec->rsvd1);
888                 field = ulp_rte_parser_fld_copy(field,
889                                                 &vxlan_spec->rsvd1,
890                                                 size);
891         }
892         if (vxlan_mask) {
893                 ulp_rte_prsr_mask_copy(params, &idx,
894                                        &vxlan_mask->flags,
895                                        sizeof(vxlan_mask->flags));
896                 ulp_rte_prsr_mask_copy(params, &idx,
897                                        &vxlan_mask->rsvd0,
898                                        sizeof(vxlan_mask->rsvd0));
899                 ulp_rte_prsr_mask_copy(params, &idx,
900                                        &vxlan_mask->vni,
901                                        sizeof(vxlan_mask->vni));
902                 ulp_rte_prsr_mask_copy(params, &idx,
903                                        &vxlan_mask->rsvd1,
904                                        sizeof(vxlan_mask->rsvd1));
905         }
906         /* Add number of vxlan header elements */
907         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
908
909         /* Update the hdr_bitmap with vxlan */
910         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
911         return BNXT_TF_RC_SUCCESS;
912 }
913
914 /* Function to handle the parsing of RTE Flow item void Header */
915 int32_t
916 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
917                          struct ulp_rte_parser_params *params __rte_unused)
918 {
919         return BNXT_TF_RC_SUCCESS;
920 }
921
922 /* Function to handle the parsing of RTE Flow action void Header. */
923 int32_t
924 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
925                          struct ulp_rte_parser_params *params __rte_unused)
926 {
927         return BNXT_TF_RC_SUCCESS;
928 }
929
930 /* Function to handle the parsing of RTE Flow action Mark Header. */
931 int32_t
932 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
933                          struct ulp_rte_parser_params *param)
934 {
935         const struct rte_flow_action_mark *mark;
936         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
937         uint32_t mark_id;
938
939         mark = action_item->conf;
940         if (mark) {
941                 mark_id = tfp_cpu_to_be_32(mark->id);
942                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
943                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
944
945                 /* Update the hdr_bitmap with vxlan */
946                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
947                 return BNXT_TF_RC_SUCCESS;
948         }
949         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
950         return BNXT_TF_RC_ERROR;
951 }
952
953 /* Function to handle the parsing of RTE Flow action RSS Header. */
954 int32_t
955 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
956                         struct ulp_rte_parser_params *param)
957 {
958         const struct rte_flow_action_rss *rss = action_item->conf;
959
960         if (rss) {
961                 /* Update the hdr_bitmap with vxlan */
962                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
963                 return BNXT_TF_RC_SUCCESS;
964         }
965         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
966         return BNXT_TF_RC_ERROR;
967 }
968
969 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
970 int32_t
971 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
972                                 struct ulp_rte_parser_params *params)
973 {
974         const struct rte_flow_action_vxlan_encap *vxlan_encap;
975         const struct rte_flow_item *item;
976         const struct rte_flow_item_eth *eth_spec;
977         const struct rte_flow_item_ipv4 *ipv4_spec;
978         const struct rte_flow_item_ipv6 *ipv6_spec;
979         struct rte_flow_item_vxlan vxlan_spec;
980         uint32_t vlan_num = 0, vlan_size = 0;
981         uint32_t ip_size = 0, ip_type = 0;
982         uint32_t vxlan_size = 0;
983         uint8_t *buff;
984         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
985         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
986                                     0x00, 0x40, 0x11};
987         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
988         struct ulp_rte_act_prop *ap = &params->act_prop;
989
990         vxlan_encap = action_item->conf;
991         if (!vxlan_encap) {
992                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
993                 return BNXT_TF_RC_ERROR;
994         }
995
996         item = vxlan_encap->definition;
997         if (!item) {
998                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
999                 return BNXT_TF_RC_ERROR;
1000         }
1001
1002         if (!ulp_rte_item_skip_void(&item, 0))
1003                 return BNXT_TF_RC_ERROR;
1004
1005         /* must have ethernet header */
1006         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1007                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1008                 return BNXT_TF_RC_ERROR;
1009         }
1010         eth_spec = item->spec;
1011         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1012         ulp_encap_buffer_copy(buff,
1013                               eth_spec->dst.addr_bytes,
1014                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1015
1016         /* Goto the next item */
1017         if (!ulp_rte_item_skip_void(&item, 1))
1018                 return BNXT_TF_RC_ERROR;
1019
1020         /* May have vlan header */
1021         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1022                 vlan_num++;
1023                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1024                 ulp_encap_buffer_copy(buff,
1025                                       item->spec,
1026                                       sizeof(struct rte_flow_item_vlan));
1027
1028                 if (!ulp_rte_item_skip_void(&item, 1))
1029                         return BNXT_TF_RC_ERROR;
1030         }
1031
1032         /* may have two vlan headers */
1033         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1034                 vlan_num++;
1035                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1036                        sizeof(struct rte_flow_item_vlan)],
1037                        item->spec,
1038                        sizeof(struct rte_flow_item_vlan));
1039                 if (!ulp_rte_item_skip_void(&item, 1))
1040                         return BNXT_TF_RC_ERROR;
1041         }
1042         /* Update the vlan count and size of more than one */
1043         if (vlan_num) {
1044                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1045                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1046                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1047                        &vlan_num,
1048                        sizeof(uint32_t));
1049                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1050                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1051                        &vlan_size,
1052                        sizeof(uint32_t));
1053         }
1054
1055         /* L3 must be IPv4, IPv6 */
1056         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1057                 ipv4_spec = item->spec;
1058                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1059
1060                 /* copy the ipv4 details */
1061                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1062                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1063                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1064                         ulp_encap_buffer_copy(buff,
1065                                               def_ipv4_hdr,
1066                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1067                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1068                 } else {
1069                         const uint8_t *tmp_buff;
1070
1071                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1072                         ulp_encap_buffer_copy(buff,
1073                                               &ipv4_spec->hdr.version_ihl,
1074                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1075                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1076                              BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
1077                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1078                         ulp_encap_buffer_copy(buff,
1079                                               tmp_buff,
1080                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1081                 }
1082                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1083                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1084                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1085                 ulp_encap_buffer_copy(buff,
1086                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1087                                       BNXT_ULP_ENCAP_IPV4_DEST_IP);
1088
1089                 /* Update the ip size details */
1090                 ip_size = tfp_cpu_to_be_32(ip_size);
1091                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1092                        &ip_size, sizeof(uint32_t));
1093
1094                 /* update the ip type */
1095                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1096                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1097                        &ip_type, sizeof(uint32_t));
1098
1099                 if (!ulp_rte_item_skip_void(&item, 1))
1100                         return BNXT_TF_RC_ERROR;
1101         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1102                 ipv6_spec = item->spec;
1103                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1104
1105                 /* copy the ipv4 details */
1106                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1107                        ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1108
1109                 /* Update the ip size details */
1110                 ip_size = tfp_cpu_to_be_32(ip_size);
1111                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1112                        &ip_size, sizeof(uint32_t));
1113
1114                  /* update the ip type */
1115                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1116                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1117                        &ip_type, sizeof(uint32_t));
1118
1119                 if (!ulp_rte_item_skip_void(&item, 1))
1120                         return BNXT_TF_RC_ERROR;
1121         } else {
1122                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1123                 return BNXT_TF_RC_ERROR;
1124         }
1125
1126         /* L4 is UDP */
1127         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1128                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1129                 return BNXT_TF_RC_ERROR;
1130         }
1131         /* copy the udp details */
1132         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1133                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1134
1135         if (!ulp_rte_item_skip_void(&item, 1))
1136                 return BNXT_TF_RC_ERROR;
1137
1138         /* Finally VXLAN */
1139         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1140                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1141                 return BNXT_TF_RC_ERROR;
1142         }
1143         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1144         /* copy the vxlan details */
1145         memcpy(&vxlan_spec, item->spec, vxlan_size);
1146         vxlan_spec.flags = 0x08;
1147         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1148                               (const uint8_t *)&vxlan_spec,
1149                               vxlan_size);
1150         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1151         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1152                &vxlan_size, sizeof(uint32_t));
1153
1154         /*update the hdr_bitmap with vxlan */
1155         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1156         return BNXT_TF_RC_SUCCESS;
1157 }
1158
1159 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1160 int32_t
1161 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1162                                 __rte_unused,
1163                                 struct ulp_rte_parser_params *params)
1164 {
1165         /* update the hdr_bitmap with vxlan */
1166         ULP_BITMAP_SET(params->act_bitmap.bits,
1167                        BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1168         return BNXT_TF_RC_SUCCESS;
1169 }
1170
1171 /* Function to handle the parsing of RTE Flow action drop Header. */
1172 int32_t
1173 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1174                          struct ulp_rte_parser_params *params)
1175 {
1176         /* Update the hdr_bitmap with drop */
1177         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1178         return BNXT_TF_RC_SUCCESS;
1179 }
1180
1181 /* Function to handle the parsing of RTE Flow action count. */
1182 int32_t
1183 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1184                           struct ulp_rte_parser_params *params)
1185
1186 {
1187         const struct rte_flow_action_count *act_count;
1188         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1189
1190         act_count = action_item->conf;
1191         if (act_count) {
1192                 if (act_count->shared) {
1193                         BNXT_TF_DBG(ERR,
1194                                     "Parse Error:Shared count not supported\n");
1195                         return BNXT_TF_RC_PARSE_ERR;
1196                 }
1197                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1198                        &act_count->id,
1199                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1200         }
1201
1202         /* Update the hdr_bitmap with count */
1203         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1204         return BNXT_TF_RC_SUCCESS;
1205 }
1206
1207 /* Function to handle the parsing of RTE Flow action PF. */
1208 int32_t
1209 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1210                        struct ulp_rte_parser_params *params)
1211 {
1212         uint32_t svif;
1213
1214         /* Update the hdr_bitmap with vnic bit */
1215         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1216
1217         /* copy the PF of the current device into VNIC Property */
1218         svif = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1219         svif = bnxt_get_vnic_id(svif);
1220         svif = rte_cpu_to_be_32(svif);
1221         memcpy(&params->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1222                &svif, BNXT_ULP_ACT_PROP_SZ_VNIC);
1223
1224         return BNXT_TF_RC_SUCCESS;
1225 }
1226
1227 /* Function to handle the parsing of RTE Flow action VF. */
1228 int32_t
1229 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1230                        struct ulp_rte_parser_params *param)
1231 {
1232         const struct rte_flow_action_vf *vf_action;
1233         uint32_t pid;
1234
1235         vf_action = action_item->conf;
1236         if (vf_action) {
1237                 if (vf_action->original) {
1238                         BNXT_TF_DBG(ERR,
1239                                     "Parse Error:VF Original not supported\n");
1240                         return BNXT_TF_RC_PARSE_ERR;
1241                 }
1242                 /* TBD: Update the computed VNIC using VF conversion */
1243                 pid = bnxt_get_vnic_id(vf_action->id);
1244                 pid = rte_cpu_to_be_32(pid);
1245                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1246                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1247         }
1248
1249         /* Update the hdr_bitmap with count */
1250         ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1251         return BNXT_TF_RC_SUCCESS;
1252 }
1253
1254 /* Function to handle the parsing of RTE Flow action port_id. */
1255 int32_t
1256 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1257                             struct ulp_rte_parser_params *param)
1258 {
1259         const struct rte_flow_action_port_id *port_id;
1260         uint32_t pid;
1261
1262         port_id = act_item->conf;
1263         if (port_id) {
1264                 if (port_id->original) {
1265                         BNXT_TF_DBG(ERR,
1266                                     "ParseErr:Portid Original not supported\n");
1267                         return BNXT_TF_RC_PARSE_ERR;
1268                 }
1269                 /* TBD: Update the computed VNIC using port conversion */
1270                 pid = bnxt_get_vnic_id(port_id->id);
1271                 pid = rte_cpu_to_be_32(pid);
1272                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1273                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1274         }
1275
1276         /* Update the hdr_bitmap with count */
1277         ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VNIC);
1278         return BNXT_TF_RC_SUCCESS;
1279 }
1280
1281 /* Function to handle the parsing of RTE Flow action phy_port. */
1282 int32_t
1283 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1284                              struct ulp_rte_parser_params *prm)
1285 {
1286         const struct rte_flow_action_phy_port *phy_port;
1287         uint32_t pid;
1288
1289         phy_port = action_item->conf;
1290         if (phy_port) {
1291                 if (phy_port->original) {
1292                         BNXT_TF_DBG(ERR,
1293                                     "Parse Err:Port Original not supported\n");
1294                         return BNXT_TF_RC_PARSE_ERR;
1295                 }
1296                 pid = bnxt_get_vnic_id(phy_port->index);
1297                 pid = rte_cpu_to_be_32(pid);
1298                 memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1299                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1300         }
1301
1302         /* Update the hdr_bitmap with count */
1303         ULP_BITMAP_SET(prm->act_bitmap.bits, BNXT_ULP_ACTION_BIT_VPORT);
1304         return BNXT_TF_RC_SUCCESS;
1305 }