7a31b43601a3290f5f4ba13fd9228e51fcb1ff31
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
12 #include "tfp.h"
13
14 /* Inline Func to read integer that is stored in big endian format */
15 static inline void ulp_util_field_int_read(uint8_t *buffer,
16                                            uint32_t *val)
17 {
18         uint32_t temp_val;
19
20         memcpy(&temp_val, buffer, sizeof(uint32_t));
21         *val = rte_be_to_cpu_32(temp_val);
22 }
23
24 /* Inline Func to write integer that is stored in big endian format */
25 static inline void ulp_util_field_int_write(uint8_t *buffer,
26                                             uint32_t val)
27 {
28         uint32_t temp_val = rte_cpu_to_be_32(val);
29
30         memcpy(buffer, &temp_val, sizeof(uint32_t));
31 }
32
33 /* Utility function to skip the void items. */
34 static inline int32_t
35 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
36 {
37         if (!*item)
38                 return 0;
39         if (increment)
40                 (*item)++;
41         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
42                 (*item)++;
43         if (*item)
44                 return 1;
45         return 0;
46 }
47
48 /*
49  * Function to handle the parsing of RTE Flows and placing
50  * the RTE flow items into the ulp structures.
51  */
52 int32_t
53 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
54                               struct ulp_rte_hdr_bitmap *hdr_bitmap,
55                               struct ulp_rte_hdr_field *hdr_field)
56 {
57         const struct rte_flow_item *item = pattern;
58         uint32_t field_idx = BNXT_ULP_HDR_FIELD_LAST;
59         uint32_t vlan_idx = 0;
60         struct bnxt_ulp_rte_hdr_info *hdr_info;
61
62         /* Parse all the items in the pattern */
63         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
64                 /* get the header information from the flow_hdr_info table */
65                 hdr_info = &ulp_hdr_info[item->type];
66                 if (hdr_info->hdr_type ==
67                     BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
68                         BNXT_TF_DBG(ERR,
69                                     "Truflow parser does not support type %d\n",
70                                     item->type);
71                         return BNXT_TF_RC_PARSE_ERR;
72                 } else if (hdr_info->hdr_type ==
73                            BNXT_ULP_HDR_TYPE_SUPPORTED) {
74                         /* call the registered callback handler */
75                         if (hdr_info->proto_hdr_func) {
76                                 if (hdr_info->proto_hdr_func(item,
77                                                              hdr_bitmap,
78                                                              hdr_field,
79                                                              &field_idx,
80                                                              &vlan_idx) !=
81                                     BNXT_TF_RC_SUCCESS) {
82                                         return BNXT_TF_RC_ERROR;
83                                 }
84                         }
85                 }
86                 item++;
87         }
88         return BNXT_TF_RC_SUCCESS;
89 }
90
91 /*
92  * Function to handle the parsing of RTE Flows and placing
93  * the RTE flow actions into the ulp structures.
94  */
95 int32_t
96 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
97                               struct ulp_rte_act_bitmap *act_bitmap,
98                               struct ulp_rte_act_prop *act_prop)
99 {
100         const struct rte_flow_action *action_item = actions;
101         struct bnxt_ulp_rte_act_info *hdr_info;
102
103         /* Parse all the items in the pattern */
104         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
105                 /* get the header information from the flow_hdr_info table */
106                 hdr_info = &ulp_act_info[action_item->type];
107                 if (hdr_info->act_type ==
108                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
109                         BNXT_TF_DBG(ERR,
110                                     "Truflow parser does not support act %u\n",
111                                     action_item->type);
112                         return BNXT_TF_RC_ERROR;
113                 } else if (hdr_info->act_type ==
114                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
115                         /* call the registered callback handler */
116                         if (hdr_info->proto_act_func) {
117                                 if (hdr_info->proto_act_func(action_item,
118                                                              act_bitmap,
119                                                              act_prop) !=
120                                     BNXT_TF_RC_SUCCESS) {
121                                         return BNXT_TF_RC_ERROR;
122                                 }
123                         }
124                 }
125                 action_item++;
126         }
127         return BNXT_TF_RC_SUCCESS;
128 }
129
130 /* Function to handle the parsing of RTE Flow item PF Header. */
131 static int32_t
132 ulp_rte_parser_svif_set(struct ulp_rte_hdr_bitmap *hdr_bitmap,
133                         struct ulp_rte_hdr_field *hdr_field,
134                         enum rte_flow_item_type proto,
135                         uint32_t svif,
136                         uint32_t mask)
137 {
138         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF)) {
139                 BNXT_TF_DBG(ERR,
140                             "SVIF already set,"
141                             " multiple sources not supported\n");
142                 return BNXT_TF_RC_ERROR;
143         }
144
145         /* TBD: Check for any mapping errors for svif */
146         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_SVIF. */
147         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_SVIF);
148
149         if (proto != RTE_FLOW_ITEM_TYPE_PF) {
150                 memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].spec,
151                        &svif, sizeof(svif));
152                 memcpy(hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].mask,
153                        &mask, sizeof(mask));
154                 hdr_field[BNXT_ULP_HDR_FIELD_SVIF_INDEX].size = sizeof(svif);
155         }
156
157         return BNXT_TF_RC_SUCCESS;
158 }
159
160 /* Function to handle the parsing of RTE Flow item PF Header. */
161 int32_t
162 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item,
163                        struct ulp_rte_hdr_bitmap *hdr_bitmap,
164                        struct ulp_rte_hdr_field *hdr_field,
165                        uint32_t *field_idx __rte_unused,
166                        uint32_t *vlan_idx __rte_unused)
167 {
168         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
169                                        item->type, 0, 0);
170 }
171
172 /* Function to handle the parsing of RTE Flow item VF Header. */
173 int32_t
174 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
175                        struct ulp_rte_hdr_bitmap *hdr_bitmap,
176                        struct ulp_rte_hdr_field  *hdr_field,
177                        uint32_t *field_idx __rte_unused,
178                        uint32_t *vlan_idx __rte_unused)
179 {
180         const struct rte_flow_item_vf *vf_spec, *vf_mask;
181         uint32_t svif = 0, mask = 0;
182
183         vf_spec = item->spec;
184         vf_mask = item->mask;
185
186         /*
187          * Copy the rte_flow_item for eth into hdr_field using ethernet
188          * header fields.
189          */
190         if (vf_spec)
191                 svif = vf_spec->id;
192         if (vf_mask)
193                 mask = vf_mask->id;
194
195         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
196                                        item->type, svif, mask);
197 }
198
199 /* Function to handle the parsing of RTE Flow item port id  Header. */
200 int32_t
201 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
202                             struct ulp_rte_hdr_bitmap *hdr_bitmap,
203                             struct ulp_rte_hdr_field *hdr_field,
204                             uint32_t *field_idx __rte_unused,
205                             uint32_t *vlan_idx __rte_unused)
206 {
207         const struct rte_flow_item_port_id *port_spec, *port_mask;
208         uint32_t svif = 0, mask = 0;
209
210         port_spec = item->spec;
211         port_mask = item->mask;
212
213         /*
214          * Copy the rte_flow_item for Port into hdr_field using port id
215          * header fields.
216          */
217         if (port_spec)
218                 svif = port_spec->id;
219         if (port_mask)
220                 mask = port_mask->id;
221
222         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
223                                        item->type, svif, mask);
224 }
225
226 /* Function to handle the parsing of RTE Flow item phy port Header. */
227 int32_t
228 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
229                              struct ulp_rte_hdr_bitmap *hdr_bitmap,
230                              struct ulp_rte_hdr_field *hdr_field,
231                              uint32_t *field_idx __rte_unused,
232                              uint32_t *vlan_idx __rte_unused)
233 {
234         const struct rte_flow_item_phy_port *port_spec, *port_mask;
235         uint32_t svif = 0, mask = 0;
236
237         port_spec = item->spec;
238         port_mask = item->mask;
239
240         /* Copy the rte_flow_item for phy port into hdr_field */
241         if (port_spec)
242                 svif = port_spec->index;
243         if (port_mask)
244                 mask = port_mask->index;
245
246         return ulp_rte_parser_svif_set(hdr_bitmap, hdr_field,
247                                        item->type, svif, mask);
248 }
249
250 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
251 int32_t
252 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
253                         struct ulp_rte_hdr_bitmap *hdr_bitmap,
254                         struct ulp_rte_hdr_field *hdr_field,
255                         uint32_t *field_idx,
256                         uint32_t *vlan_idx)
257 {
258         const struct rte_flow_item_eth *eth_spec, *eth_mask;
259         uint32_t idx = *field_idx;
260         uint32_t mdx = *field_idx;
261         uint64_t set_flag = 0;
262
263         eth_spec = item->spec;
264         eth_mask = item->mask;
265
266         /*
267          * Copy the rte_flow_item for eth into hdr_field using ethernet
268          * header fields
269          */
270         if (eth_spec) {
271                 hdr_field[idx].size = sizeof(eth_spec->dst.addr_bytes);
272                 memcpy(hdr_field[idx++].spec, eth_spec->dst.addr_bytes,
273                        sizeof(eth_spec->dst.addr_bytes));
274                 hdr_field[idx].size = sizeof(eth_spec->src.addr_bytes);
275                 memcpy(hdr_field[idx++].spec, eth_spec->src.addr_bytes,
276                        sizeof(eth_spec->src.addr_bytes));
277                 hdr_field[idx].size = sizeof(eth_spec->type);
278                 memcpy(hdr_field[idx++].spec, &eth_spec->type,
279                        sizeof(eth_spec->type));
280         } else {
281                 idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
282         }
283
284         if (eth_mask) {
285                 memcpy(hdr_field[mdx++].mask, eth_mask->dst.addr_bytes,
286                        sizeof(eth_mask->dst.addr_bytes));
287                 memcpy(hdr_field[mdx++].mask, eth_mask->src.addr_bytes,
288                        sizeof(eth_mask->src.addr_bytes));
289                 memcpy(hdr_field[mdx++].mask, &eth_mask->type,
290                        sizeof(eth_mask->type));
291         }
292         /* Add number of vlan header elements */
293         *field_idx = idx + BNXT_ULP_PROTO_HDR_VLAN_NUM;
294         *vlan_idx = idx;
295
296         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
297         set_flag = ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
298         if (set_flag)
299                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
300         else
301                 ULP_BITMAP_RESET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_ETH);
302
303         /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
304         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH);
305
306         return BNXT_TF_RC_SUCCESS;
307 }
308
309 /* Function to handle the parsing of RTE Flow item Vlan Header. */
310 int32_t
311 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
312                          struct ulp_rte_hdr_bitmap *hdr_bitmap,
313                          struct ulp_rte_hdr_field *hdr_field,
314                          uint32_t *field_idx __rte_unused,
315                          uint32_t *vlan_idx)
316 {
317         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
318         uint32_t idx = *vlan_idx;
319         uint32_t mdx = *vlan_idx;
320         uint16_t vlan_tag, priority;
321         uint32_t outer_vtag_num = 0, inner_vtag_num = 0;
322         uint8_t *outer_tag_buffer;
323         uint8_t *inner_tag_buffer;
324
325         vlan_spec = item->spec;
326         vlan_mask = item->mask;
327         outer_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].spec;
328         inner_tag_buffer = hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].spec;
329
330         /*
331          * Copy the rte_flow_item for vlan into hdr_field using Vlan
332          * header fields
333          */
334         if (vlan_spec) {
335                 vlan_tag = ntohs(vlan_spec->tci);
336                 priority = htons(vlan_tag >> 13);
337                 vlan_tag &= 0xfff;
338                 vlan_tag = htons(vlan_tag);
339
340                 hdr_field[idx].size = sizeof(priority);
341                 memcpy(hdr_field[idx++].spec, &priority, sizeof(priority));
342                 hdr_field[idx].size = sizeof(vlan_tag);
343                 memcpy(hdr_field[idx++].spec, &vlan_tag, sizeof(vlan_tag));
344                 hdr_field[idx].size = sizeof(vlan_spec->inner_type);
345                 memcpy(hdr_field[idx++].spec, &vlan_spec->inner_type,
346                        sizeof(vlan_spec->inner_type));
347         } else {
348                 idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
349         }
350
351         if (vlan_mask) {
352                 vlan_tag = ntohs(vlan_mask->tci);
353                 priority = htons(vlan_tag >> 13);
354                 vlan_tag &= 0xfff;
355                 vlan_tag = htons(vlan_tag);
356
357                 memcpy(hdr_field[mdx++].mask, &priority, sizeof(priority));
358                 memcpy(hdr_field[mdx++].mask, &vlan_tag, sizeof(vlan_tag));
359                 memcpy(hdr_field[mdx++].mask, &vlan_mask->inner_type,
360                        sizeof(vlan_mask->inner_type));
361         }
362         /* Set the vlan index to new incremented value */
363         *vlan_idx = idx;
364
365         /* Get the outer tag and inner tag counts */
366         ulp_util_field_int_read(outer_tag_buffer, &outer_vtag_num);
367         ulp_util_field_int_read(inner_tag_buffer, &inner_vtag_num);
368
369         /* Update the hdr_bitmap of the vlans */
370         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
371             !ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN)) {
372                 /* Set the outer vlan bit and update the vlan tag num */
373                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OO_VLAN);
374                 outer_vtag_num++;
375                 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
376                 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
377                                                         sizeof(uint32_t);
378         } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
379                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
380                                     BNXT_ULP_HDR_BIT_OO_VLAN) &&
381                    !ULP_BITMAP_ISSET(hdr_bitmap->bits,
382                                      BNXT_ULP_HDR_BIT_OI_VLAN)) {
383                 /* Set the outer vlan bit and update the vlan tag num */
384                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_OI_VLAN);
385                 outer_vtag_num++;
386                 ulp_util_field_int_write(outer_tag_buffer, outer_vtag_num);
387                 hdr_field[BNXT_ULP_HDR_FIELD_O_VTAG_NUM].size =
388                                                             sizeof(uint32_t);
389         } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
390                                     BNXT_ULP_HDR_BIT_O_ETH) &&
391                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
392                                     BNXT_ULP_HDR_BIT_OO_VLAN) &&
393                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
394                                     BNXT_ULP_HDR_BIT_OI_VLAN) &&
395                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
396                                     BNXT_ULP_HDR_BIT_I_ETH) &&
397                    !ULP_BITMAP_ISSET(hdr_bitmap->bits,
398                                      BNXT_ULP_HDR_BIT_IO_VLAN)) {
399                 /* Set the inner vlan bit and update the vlan tag num */
400                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_IO_VLAN);
401                 inner_vtag_num++;
402                 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
403                 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
404                                                             sizeof(uint32_t);
405         } else if (ULP_BITMAP_ISSET(hdr_bitmap->bits,
406                                     BNXT_ULP_HDR_BIT_O_ETH) &&
407                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
408                                     BNXT_ULP_HDR_BIT_OO_VLAN) &&
409                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
410                                     BNXT_ULP_HDR_BIT_OI_VLAN) &&
411                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
412                                     BNXT_ULP_HDR_BIT_I_ETH) &&
413                    ULP_BITMAP_ISSET(hdr_bitmap->bits,
414                                     BNXT_ULP_HDR_BIT_IO_VLAN) &&
415                    !ULP_BITMAP_ISSET(hdr_bitmap->bits,
416                                      BNXT_ULP_HDR_BIT_II_VLAN)) {
417                 /* Set the inner vlan bit and update the vlan tag num */
418                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_II_VLAN);
419                 inner_vtag_num++;
420                 ulp_util_field_int_write(inner_tag_buffer, inner_vtag_num);
421                 hdr_field[BNXT_ULP_HDR_FIELD_I_VTAG_NUM].size =
422                                                             sizeof(uint32_t);
423         } else {
424                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
425                 return BNXT_TF_RC_ERROR;
426         }
427         return BNXT_TF_RC_SUCCESS;
428 }
429
430 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
431 int32_t
432 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
433                          struct ulp_rte_hdr_bitmap *hdr_bitmap,
434                          struct ulp_rte_hdr_field *hdr_field,
435                          uint32_t *field_idx,
436                          uint32_t *vlan_idx __rte_unused)
437 {
438         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
439         uint32_t idx = *field_idx;
440         uint32_t mdx = *field_idx;
441
442         ipv4_spec = item->spec;
443         ipv4_mask = item->mask;
444
445         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
446                 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
447                 return BNXT_TF_RC_ERROR;
448         }
449
450         /*
451          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
452          * header fields
453          */
454         if (ipv4_spec) {
455                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.version_ihl);
456                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.version_ihl,
457                        sizeof(ipv4_spec->hdr.version_ihl));
458                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.type_of_service);
459                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.type_of_service,
460                        sizeof(ipv4_spec->hdr.type_of_service));
461                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.total_length);
462                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.total_length,
463                        sizeof(ipv4_spec->hdr.total_length));
464                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.packet_id);
465                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.packet_id,
466                        sizeof(ipv4_spec->hdr.packet_id));
467                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.fragment_offset);
468                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.fragment_offset,
469                        sizeof(ipv4_spec->hdr.fragment_offset));
470                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.time_to_live);
471                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.time_to_live,
472                        sizeof(ipv4_spec->hdr.time_to_live));
473                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.next_proto_id);
474                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.next_proto_id,
475                        sizeof(ipv4_spec->hdr.next_proto_id));
476                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.hdr_checksum);
477                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.hdr_checksum,
478                        sizeof(ipv4_spec->hdr.hdr_checksum));
479                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.src_addr);
480                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.src_addr,
481                        sizeof(ipv4_spec->hdr.src_addr));
482                 hdr_field[idx].size = sizeof(ipv4_spec->hdr.dst_addr);
483                 memcpy(hdr_field[idx++].spec, &ipv4_spec->hdr.dst_addr,
484                        sizeof(ipv4_spec->hdr.dst_addr));
485         } else {
486                 idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
487         }
488
489         if (ipv4_mask) {
490                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.version_ihl,
491                        sizeof(ipv4_mask->hdr.version_ihl));
492                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.type_of_service,
493                        sizeof(ipv4_mask->hdr.type_of_service));
494                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.total_length,
495                        sizeof(ipv4_mask->hdr.total_length));
496                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.packet_id,
497                        sizeof(ipv4_mask->hdr.packet_id));
498                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.fragment_offset,
499                        sizeof(ipv4_mask->hdr.fragment_offset));
500                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.time_to_live,
501                        sizeof(ipv4_mask->hdr.time_to_live));
502                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.next_proto_id,
503                        sizeof(ipv4_mask->hdr.next_proto_id));
504                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.hdr_checksum,
505                        sizeof(ipv4_mask->hdr.hdr_checksum));
506                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.src_addr,
507                        sizeof(ipv4_mask->hdr.src_addr));
508                 memcpy(hdr_field[mdx++].mask, &ipv4_mask->hdr.dst_addr,
509                        sizeof(ipv4_mask->hdr.dst_addr));
510         }
511         *field_idx = idx; /* Number of ipv4 header elements */
512
513         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
514         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
515             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
516             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
517                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
518                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
519         } else {
520                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
521                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
522         }
523         return BNXT_TF_RC_SUCCESS;
524 }
525
526 /* Function to handle the parsing of RTE Flow item IPV6 Header */
527 int32_t
528 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
529                          struct ulp_rte_hdr_bitmap *hdr_bitmap,
530                          struct ulp_rte_hdr_field *hdr_field,
531                          uint32_t *field_idx,
532                          uint32_t *vlan_idx __rte_unused)
533 {
534         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
535         uint32_t idx = *field_idx;
536         uint32_t mdx = *field_idx;
537
538         ipv6_spec = item->spec;
539         ipv6_mask = item->mask;
540
541         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3)) {
542                 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
543                 return BNXT_TF_RC_ERROR;
544         }
545
546         /*
547          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
548          * header fields
549          */
550         if (ipv6_spec) {
551                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.vtc_flow);
552                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.vtc_flow,
553                        sizeof(ipv6_spec->hdr.vtc_flow));
554                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.payload_len);
555                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.payload_len,
556                        sizeof(ipv6_spec->hdr.payload_len));
557                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.proto);
558                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.proto,
559                        sizeof(ipv6_spec->hdr.proto));
560                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.hop_limits);
561                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.hop_limits,
562                        sizeof(ipv6_spec->hdr.hop_limits));
563                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.src_addr);
564                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.src_addr,
565                        sizeof(ipv6_spec->hdr.src_addr));
566                 hdr_field[idx].size = sizeof(ipv6_spec->hdr.dst_addr);
567                 memcpy(hdr_field[idx++].spec, &ipv6_spec->hdr.dst_addr,
568                        sizeof(ipv6_spec->hdr.dst_addr));
569         } else {
570                 idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
571         }
572
573         if (ipv6_mask) {
574                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.vtc_flow,
575                        sizeof(ipv6_mask->hdr.vtc_flow));
576                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.payload_len,
577                        sizeof(ipv6_mask->hdr.payload_len));
578                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.proto,
579                        sizeof(ipv6_mask->hdr.proto));
580                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.hop_limits,
581                        sizeof(ipv6_mask->hdr.hop_limits));
582                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.src_addr,
583                        sizeof(ipv6_mask->hdr.src_addr));
584                 memcpy(hdr_field[mdx++].mask, &ipv6_mask->hdr.dst_addr,
585                        sizeof(ipv6_mask->hdr.dst_addr));
586         }
587         *field_idx = idx; /* add number of ipv6 header elements */
588
589         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
590         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3) ||
591             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
592             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
593                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
594                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L3);
595         } else {
596                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
597                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L3);
598         }
599         return BNXT_TF_RC_SUCCESS;
600 }
601
602 /* Function to handle the parsing of RTE Flow item UDP Header. */
603 int32_t
604 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
605                         struct ulp_rte_hdr_bitmap *hdr_bitmap,
606                         struct ulp_rte_hdr_field *hdr_field,
607                         uint32_t *field_idx,
608                         uint32_t *vlan_idx __rte_unused)
609 {
610         const struct rte_flow_item_udp *udp_spec, *udp_mask;
611         uint32_t idx = *field_idx;
612         uint32_t mdx = *field_idx;
613
614         udp_spec = item->spec;
615         udp_mask = item->mask;
616
617         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
618                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
619                 return BNXT_TF_RC_ERROR;
620         }
621
622         /*
623          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
624          * header fields
625          */
626         if (udp_spec) {
627                 hdr_field[idx].size = sizeof(udp_spec->hdr.src_port);
628                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.src_port,
629                        sizeof(udp_spec->hdr.src_port));
630                 hdr_field[idx].size = sizeof(udp_spec->hdr.dst_port);
631                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dst_port,
632                        sizeof(udp_spec->hdr.dst_port));
633                 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_len);
634                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_len,
635                        sizeof(udp_spec->hdr.dgram_len));
636                 hdr_field[idx].size = sizeof(udp_spec->hdr.dgram_cksum);
637                 memcpy(hdr_field[idx++].spec, &udp_spec->hdr.dgram_cksum,
638                        sizeof(udp_spec->hdr.dgram_cksum));
639         } else {
640                 idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
641         }
642
643         if (udp_mask) {
644                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.src_port,
645                        sizeof(udp_mask->hdr.src_port));
646                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dst_port,
647                        sizeof(udp_mask->hdr.dst_port));
648                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_len,
649                        sizeof(udp_mask->hdr.dgram_len));
650                 memcpy(hdr_field[mdx++].mask, &udp_mask->hdr.dgram_cksum,
651                        sizeof(udp_mask->hdr.dgram_cksum));
652         }
653         *field_idx = idx; /* Add number of UDP header elements */
654
655         /* Set the udp header bitmap and computed l4 header bitmaps */
656         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
657             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
658             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
659                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
660                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
661         } else {
662                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
663                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
664         }
665         return BNXT_TF_RC_SUCCESS;
666 }
667
668 /* Function to handle the parsing of RTE Flow item TCP Header. */
669 int32_t
670 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
671                         struct ulp_rte_hdr_bitmap *hdr_bitmap,
672                         struct ulp_rte_hdr_field *hdr_field,
673                         uint32_t *field_idx,
674                         uint32_t *vlan_idx __rte_unused)
675 {
676         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
677         uint32_t idx = *field_idx;
678         uint32_t mdx = *field_idx;
679
680         tcp_spec = item->spec;
681         tcp_mask = item->mask;
682
683         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4)) {
684                 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
685                 return BNXT_TF_RC_ERROR;
686         }
687
688         /*
689          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
690          * header fields
691          */
692         if (tcp_spec) {
693                 hdr_field[idx].size = sizeof(tcp_spec->hdr.src_port);
694                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.src_port,
695                        sizeof(tcp_spec->hdr.src_port));
696                 hdr_field[idx].size = sizeof(tcp_spec->hdr.dst_port);
697                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.dst_port,
698                        sizeof(tcp_spec->hdr.dst_port));
699                 hdr_field[idx].size = sizeof(tcp_spec->hdr.sent_seq);
700                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.sent_seq,
701                        sizeof(tcp_spec->hdr.sent_seq));
702                 hdr_field[idx].size = sizeof(tcp_spec->hdr.recv_ack);
703                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.recv_ack,
704                        sizeof(tcp_spec->hdr.recv_ack));
705                 hdr_field[idx].size = sizeof(tcp_spec->hdr.data_off);
706                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.data_off,
707                        sizeof(tcp_spec->hdr.data_off));
708                 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_flags);
709                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_flags,
710                        sizeof(tcp_spec->hdr.tcp_flags));
711                 hdr_field[idx].size = sizeof(tcp_spec->hdr.rx_win);
712                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.rx_win,
713                        sizeof(tcp_spec->hdr.rx_win));
714                 hdr_field[idx].size = sizeof(tcp_spec->hdr.cksum);
715                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.cksum,
716                        sizeof(tcp_spec->hdr.cksum));
717                 hdr_field[idx].size = sizeof(tcp_spec->hdr.tcp_urp);
718                 memcpy(hdr_field[idx++].spec, &tcp_spec->hdr.tcp_urp,
719                        sizeof(tcp_spec->hdr.tcp_urp));
720         } else {
721                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
722         }
723
724         if (tcp_mask) {
725                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.src_port,
726                        sizeof(tcp_mask->hdr.src_port));
727                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.dst_port,
728                        sizeof(tcp_mask->hdr.dst_port));
729                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.sent_seq,
730                        sizeof(tcp_mask->hdr.sent_seq));
731                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.recv_ack,
732                        sizeof(tcp_mask->hdr.recv_ack));
733                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.data_off,
734                        sizeof(tcp_mask->hdr.data_off));
735                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_flags,
736                        sizeof(tcp_mask->hdr.tcp_flags));
737                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.rx_win,
738                        sizeof(tcp_mask->hdr.rx_win));
739                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.cksum,
740                        sizeof(tcp_mask->hdr.cksum));
741                 memcpy(hdr_field[mdx++].mask, &tcp_mask->hdr.tcp_urp,
742                        sizeof(tcp_mask->hdr.tcp_urp));
743         }
744         *field_idx = idx; /* add number of TCP header elements */
745
746         /* Set the udp header bitmap and computed l4 header bitmaps */
747         if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4) ||
748             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
749             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
750                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
751                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_L4);
752         } else {
753                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
754                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_L4);
755         }
756         return BNXT_TF_RC_SUCCESS;
757 }
758
759 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
760 int32_t
761 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
762                           struct ulp_rte_hdr_bitmap *hdrbitmap,
763                           struct ulp_rte_hdr_field *hdr_field,
764                           uint32_t *field_idx,
765                           uint32_t *vlan_idx __rte_unused)
766 {
767         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
768         uint32_t idx = *field_idx;
769         uint32_t mdx = *field_idx;
770
771         vxlan_spec = item->spec;
772         vxlan_mask = item->mask;
773
774         /*
775          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
776          * header fields
777          */
778         if (vxlan_spec) {
779                 hdr_field[idx].size = sizeof(vxlan_spec->flags);
780                 memcpy(hdr_field[idx++].spec, &vxlan_spec->flags,
781                        sizeof(vxlan_spec->flags));
782                 hdr_field[idx].size = sizeof(vxlan_spec->rsvd0);
783                 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd0,
784                        sizeof(vxlan_spec->rsvd0));
785                 hdr_field[idx].size = sizeof(vxlan_spec->vni);
786                 memcpy(hdr_field[idx++].spec, &vxlan_spec->vni,
787                        sizeof(vxlan_spec->vni));
788                 hdr_field[idx].size = sizeof(vxlan_spec->rsvd1);
789                 memcpy(hdr_field[idx++].spec, &vxlan_spec->rsvd1,
790                        sizeof(vxlan_spec->rsvd1));
791         } else {
792                 idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
793         }
794
795         if (vxlan_mask) {
796                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->flags,
797                        sizeof(vxlan_mask->flags));
798                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd0,
799                        sizeof(vxlan_mask->rsvd0));
800                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->vni,
801                        sizeof(vxlan_mask->vni));
802                 memcpy(hdr_field[mdx++].mask, &vxlan_mask->rsvd1,
803                        sizeof(vxlan_mask->rsvd1));
804         }
805         *field_idx = idx; /* Add number of vxlan header elements */
806
807         /* Update the hdr_bitmap with vxlan */
808         ULP_BITMAP_SET(hdrbitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
809         return BNXT_TF_RC_SUCCESS;
810 }
811
812 /* Function to handle the parsing of RTE Flow item void Header */
813 int32_t
814 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
815                          struct ulp_rte_hdr_bitmap *hdr_bit __rte_unused,
816                          struct ulp_rte_hdr_field *hdr_field __rte_unused,
817                          uint32_t *field_idx __rte_unused,
818                          uint32_t *vlan_idx __rte_unused)
819 {
820         return BNXT_TF_RC_SUCCESS;
821 }
822
823 /* Function to handle the parsing of RTE Flow action void Header. */
824 int32_t
825 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
826                          struct ulp_rte_act_bitmap *act __rte_unused,
827                          struct ulp_rte_act_prop *act_prop __rte_unused)
828 {
829         return BNXT_TF_RC_SUCCESS;
830 }
831
832 /* Function to handle the parsing of RTE Flow action Mark Header. */
833 int32_t
834 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
835                          struct ulp_rte_act_bitmap *act,
836                          struct ulp_rte_act_prop *act_prop)
837 {
838         const struct rte_flow_action_mark *mark;
839         uint32_t mark_id = 0;
840
841         mark = action_item->conf;
842         if (mark) {
843                 mark_id = tfp_cpu_to_be_32(mark->id);
844                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
845                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
846
847                 /* Update the hdr_bitmap with vxlan */
848                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
849                 return BNXT_TF_RC_SUCCESS;
850         }
851         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
852         return BNXT_TF_RC_ERROR;
853 }
854
855 /* Function to handle the parsing of RTE Flow action RSS Header. */
856 int32_t
857 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
858                         struct ulp_rte_act_bitmap *act,
859                         struct ulp_rte_act_prop *act_prop __rte_unused)
860 {
861         const struct rte_flow_action_rss *rss;
862
863         rss = action_item->conf;
864         if (rss) {
865                 /* Update the hdr_bitmap with vxlan */
866                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_RSS);
867                 return BNXT_TF_RC_SUCCESS;
868         }
869         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
870         return BNXT_TF_RC_ERROR;
871 }
872
873 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
874 int32_t
875 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
876                                 struct ulp_rte_act_bitmap *act,
877                                 struct ulp_rte_act_prop *ap)
878 {
879         const struct rte_flow_action_vxlan_encap *vxlan_encap;
880         const struct rte_flow_item *item;
881         const struct rte_flow_item_eth *eth_spec;
882         const struct rte_flow_item_ipv4 *ipv4_spec;
883         const struct rte_flow_item_ipv6 *ipv6_spec;
884         struct rte_flow_item_vxlan vxlan_spec;
885         uint32_t vlan_num = 0, vlan_size = 0;
886         uint32_t ip_size = 0, ip_type = 0;
887         uint32_t vxlan_size = 0;
888         uint8_t *buff;
889         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
890         const uint8_t   def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
891                                     0x00, 0x40, 0x11};
892
893         vxlan_encap = action_item->conf;
894         if (!vxlan_encap) {
895                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
896                 return BNXT_TF_RC_ERROR;
897         }
898
899         item = vxlan_encap->definition;
900         if (!item) {
901                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
902                 return BNXT_TF_RC_ERROR;
903         }
904
905         if (!ulp_rte_item_skip_void(&item, 0))
906                 return BNXT_TF_RC_ERROR;
907
908         /* must have ethernet header */
909         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
910                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
911                 return BNXT_TF_RC_ERROR;
912         }
913         eth_spec = item->spec;
914         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
915         ulp_encap_buffer_copy(buff,
916                               eth_spec->dst.addr_bytes,
917                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
918
919         /* Goto the next item */
920         if (!ulp_rte_item_skip_void(&item, 1))
921                 return BNXT_TF_RC_ERROR;
922
923         /* May have vlan header */
924         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
925                 vlan_num++;
926                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
927                 ulp_encap_buffer_copy(buff,
928                                       item->spec,
929                                       sizeof(struct rte_flow_item_vlan));
930
931                 if (!ulp_rte_item_skip_void(&item, 1))
932                         return BNXT_TF_RC_ERROR;
933         }
934
935         /* may have two vlan headers */
936         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
937                 vlan_num++;
938                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
939                        sizeof(struct rte_flow_item_vlan)],
940                        item->spec,
941                        sizeof(struct rte_flow_item_vlan));
942                 if (!ulp_rte_item_skip_void(&item, 1))
943                         return BNXT_TF_RC_ERROR;
944         }
945         /* Update the vlan count and size of more than one */
946         if (vlan_num) {
947                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
948                 vlan_num = tfp_cpu_to_be_32(vlan_num);
949                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
950                        &vlan_num,
951                        sizeof(uint32_t));
952                 vlan_size = tfp_cpu_to_be_32(vlan_size);
953                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
954                        &vlan_size,
955                        sizeof(uint32_t));
956         }
957
958         /* L3 must be IPv4, IPv6 */
959         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
960                 ipv4_spec = item->spec;
961                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
962
963                 /* copy the ipv4 details */
964                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
965                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
966                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
967                         ulp_encap_buffer_copy(buff,
968                                               def_ipv4_hdr,
969                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
970                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
971                 } else {
972                         const uint8_t *tmp_buff;
973
974                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
975                         ulp_encap_buffer_copy(buff,
976                                               &ipv4_spec->hdr.version_ihl,
977                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
978                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
979                              BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS];
980                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
981                         ulp_encap_buffer_copy(buff,
982                                               tmp_buff,
983                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
984                 }
985                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
986                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
987                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
988                 ulp_encap_buffer_copy(buff,
989                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
990                                       BNXT_ULP_ENCAP_IPV4_DEST_IP);
991
992                 /* Update the ip size details */
993                 ip_size = tfp_cpu_to_be_32(ip_size);
994                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
995                        &ip_size, sizeof(uint32_t));
996
997                 /* update the ip type */
998                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
999                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1000                        &ip_type, sizeof(uint32_t));
1001
1002                 if (!ulp_rte_item_skip_void(&item, 1))
1003                         return BNXT_TF_RC_ERROR;
1004         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1005                 ipv6_spec = item->spec;
1006                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1007
1008                 /* copy the ipv4 details */
1009                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1010                        ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1011
1012                 /* Update the ip size details */
1013                 ip_size = tfp_cpu_to_be_32(ip_size);
1014                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1015                        &ip_size, sizeof(uint32_t));
1016
1017                  /* update the ip type */
1018                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1019                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1020                        &ip_type, sizeof(uint32_t));
1021
1022                 if (!ulp_rte_item_skip_void(&item, 1))
1023                         return BNXT_TF_RC_ERROR;
1024         } else {
1025                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1026                 return BNXT_TF_RC_ERROR;
1027         }
1028
1029         /* L4 is UDP */
1030         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1031                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1032                 return BNXT_TF_RC_ERROR;
1033         }
1034         /* copy the udp details */
1035         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1036                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1037
1038         if (!ulp_rte_item_skip_void(&item, 1))
1039                 return BNXT_TF_RC_ERROR;
1040
1041         /* Finally VXLAN */
1042         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1043                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1044                 return BNXT_TF_RC_ERROR;
1045         }
1046         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1047         /* copy the vxlan details */
1048         memcpy(&vxlan_spec, item->spec, vxlan_size);
1049         vxlan_spec.flags = 0x08;
1050         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1051                               (const uint8_t *)&vxlan_spec,
1052                               vxlan_size);
1053         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1054         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1055                &vxlan_size, sizeof(uint32_t));
1056
1057         /*update the hdr_bitmap with vxlan */
1058         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1059         return BNXT_TF_RC_SUCCESS;
1060 }
1061
1062 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1063 int32_t
1064 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1065                                 __rte_unused,
1066                                 struct ulp_rte_act_bitmap *act,
1067                                 struct ulp_rte_act_prop *act_prop __rte_unused)
1068 {
1069         /* update the hdr_bitmap with vxlan */
1070         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1071         return BNXT_TF_RC_SUCCESS;
1072 }
1073
1074 /* Function to handle the parsing of RTE Flow action drop Header. */
1075 int32_t
1076 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1077                          struct ulp_rte_act_bitmap *act,
1078                          struct ulp_rte_act_prop *act_prop __rte_unused)
1079 {
1080         /* Update the hdr_bitmap with drop */
1081         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_DROP);
1082         return BNXT_TF_RC_SUCCESS;
1083 }
1084
1085 /* Function to handle the parsing of RTE Flow action count. */
1086 int32_t
1087 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1088                           struct ulp_rte_act_bitmap *act,
1089                           struct ulp_rte_act_prop *act_prop __rte_unused)
1090
1091 {
1092         const struct rte_flow_action_count *act_count;
1093
1094         act_count = action_item->conf;
1095         if (act_count) {
1096                 if (act_count->shared) {
1097                         BNXT_TF_DBG(ERR,
1098                                     "Parse Error:Shared count not supported\n");
1099                         return BNXT_TF_RC_PARSE_ERR;
1100                 }
1101                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1102                        &act_count->id,
1103                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1104         }
1105
1106         /* Update the hdr_bitmap with count */
1107         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_COUNT);
1108         return BNXT_TF_RC_SUCCESS;
1109 }
1110
1111 /* Function to handle the parsing of RTE Flow action PF. */
1112 int32_t
1113 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1114                        struct ulp_rte_act_bitmap *act,
1115                        struct ulp_rte_act_prop *act_prop)
1116 {
1117         uint8_t *svif_buf;
1118         uint8_t *vnic_buffer;
1119         uint32_t svif;
1120
1121         /* Update the hdr_bitmap with vnic bit */
1122         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1123
1124         /* copy the PF of the current device into VNIC Property */
1125         svif_buf = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
1126         ulp_util_field_int_read(svif_buf, &svif);
1127         vnic_buffer = &act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC];
1128         ulp_util_field_int_write(vnic_buffer, svif);
1129
1130         return BNXT_TF_RC_SUCCESS;
1131 }
1132
1133 /* Function to handle the parsing of RTE Flow action VF. */
1134 int32_t
1135 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1136                        struct ulp_rte_act_bitmap *act,
1137                        struct ulp_rte_act_prop *act_prop)
1138 {
1139         const struct rte_flow_action_vf *vf_action;
1140
1141         vf_action = action_item->conf;
1142         if (vf_action) {
1143                 if (vf_action->original) {
1144                         BNXT_TF_DBG(ERR,
1145                                     "Parse Error:VF Original not supported\n");
1146                         return BNXT_TF_RC_PARSE_ERR;
1147                 }
1148                 /* TBD: Update the computed VNIC using VF conversion */
1149                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1150                        &vf_action->id,
1151                        BNXT_ULP_ACT_PROP_SZ_VNIC);
1152         }
1153
1154         /* Update the hdr_bitmap with count */
1155         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1156         return BNXT_TF_RC_SUCCESS;
1157 }
1158
1159 /* Function to handle the parsing of RTE Flow action port_id. */
1160 int32_t
1161 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1162                             struct ulp_rte_act_bitmap *act,
1163                             struct ulp_rte_act_prop *act_prop)
1164 {
1165         const struct rte_flow_action_port_id *port_id;
1166
1167         port_id = act_item->conf;
1168         if (port_id) {
1169                 if (port_id->original) {
1170                         BNXT_TF_DBG(ERR,
1171                                     "ParseErr:Portid Original not supported\n");
1172                         return BNXT_TF_RC_PARSE_ERR;
1173                 }
1174                 /* TBD: Update the computed VNIC using port conversion */
1175                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1176                        &port_id->id,
1177                        BNXT_ULP_ACT_PROP_SZ_VNIC);
1178         }
1179
1180         /* Update the hdr_bitmap with count */
1181         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VNIC);
1182         return BNXT_TF_RC_SUCCESS;
1183 }
1184
1185 /* Function to handle the parsing of RTE Flow action phy_port. */
1186 int32_t
1187 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1188                              struct ulp_rte_act_bitmap *act,
1189                              struct ulp_rte_act_prop *act_prop)
1190 {
1191         const struct rte_flow_action_phy_port *phy_port;
1192
1193         phy_port = action_item->conf;
1194         if (phy_port) {
1195                 if (phy_port->original) {
1196                         BNXT_TF_DBG(ERR,
1197                                     "Parse Err:Port Original not supported\n");
1198                         return BNXT_TF_RC_PARSE_ERR;
1199                 }
1200                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1201                        &phy_port->index,
1202                        BNXT_ULP_ACT_PROP_SZ_VPORT);
1203         }
1204
1205         /* Update the hdr_bitmap with count */
1206         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VPORT);
1207         return BNXT_TF_RC_SUCCESS;
1208 }