net/bnxt: change port config for full offload
[dpdk.git] / drivers / net / bnxt / tf_ulp / ulp_rte_parser.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2020 Broadcom
3  * All rights reserved.
4  */
5
6 #include "bnxt.h"
7 #include "ulp_template_db_enum.h"
8 #include "ulp_template_struct.h"
9 #include "bnxt_tf_common.h"
10 #include "ulp_rte_parser.h"
11 #include "ulp_utils.h"
12 #include "tfp.h"
13 #include "ulp_port_db.h"
14
15 /* Utility function to skip the void items. */
16 static inline int32_t
17 ulp_rte_item_skip_void(const struct rte_flow_item **item, uint32_t increment)
18 {
19         if (!*item)
20                 return 0;
21         if (increment)
22                 (*item)++;
23         while ((*item) && (*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
24                 (*item)++;
25         if (*item)
26                 return 1;
27         return 0;
28 }
29
30 /* Utility function to update the field_bitmap */
31 static void
32 ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
33                                    uint32_t idx)
34 {
35         struct ulp_rte_hdr_field *field;
36
37         field = &params->hdr_field[idx];
38         if (ulp_bitmap_notzero(field->mask, field->size)) {
39                 ULP_INDEX_BITMAP_SET(params->fld_bitmap.bits, idx);
40                 /* Not exact match */
41                 if (!ulp_bitmap_is_ones(field->mask, field->size))
42                         ULP_BITMAP_SET(params->fld_bitmap.bits,
43                                        BNXT_ULP_MATCH_TYPE_BITMASK_WM);
44         } else {
45                 ULP_INDEX_BITMAP_RESET(params->fld_bitmap.bits, idx);
46         }
47 }
48
49 /* Utility function to copy field spec items */
50 static struct ulp_rte_hdr_field *
51 ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
52                         const void *buffer,
53                         uint32_t size)
54 {
55         field->size = size;
56         memcpy(field->spec, buffer, field->size);
57         field++;
58         return field;
59 }
60
61 /* Utility function to copy field masks items */
62 static void
63 ulp_rte_prsr_mask_copy(struct ulp_rte_parser_params *params,
64                        uint32_t *idx,
65                        const void *buffer,
66                        uint32_t size)
67 {
68         struct ulp_rte_hdr_field *field = &params->hdr_field[*idx];
69
70         memcpy(field->mask, buffer, size);
71         ulp_rte_parser_field_bitmap_update(params, *idx);
72         *idx = *idx + 1;
73 }
74
75 /*
76  * Function to handle the parsing of RTE Flows and placing
77  * the RTE flow items into the ulp structures.
78  */
79 int32_t
80 bnxt_ulp_rte_parser_hdr_parse(const struct rte_flow_item pattern[],
81                               struct ulp_rte_parser_params *params)
82 {
83         const struct rte_flow_item *item = pattern;
84         struct bnxt_ulp_rte_hdr_info *hdr_info;
85
86         params->field_idx = BNXT_ULP_PROTO_HDR_SVIF_NUM;
87
88         /* Set the computed flags for no vlan tags before parsing */
89         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 1);
90         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 1);
91
92         /* Parse all the items in the pattern */
93         while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
94                 /* get the header information from the flow_hdr_info table */
95                 hdr_info = &ulp_hdr_info[item->type];
96                 if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
97                         BNXT_TF_DBG(ERR,
98                                     "Truflow parser does not support type %d\n",
99                                     item->type);
100                         return BNXT_TF_RC_PARSE_ERR;
101                 } else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
102                         /* call the registered callback handler */
103                         if (hdr_info->proto_hdr_func) {
104                                 if (hdr_info->proto_hdr_func(item, params) !=
105                                     BNXT_TF_RC_SUCCESS) {
106                                         return BNXT_TF_RC_ERROR;
107                                 }
108                         }
109                 }
110                 item++;
111         }
112         /* update the implied SVIF */
113         return ulp_rte_parser_implicit_match_port_process(params);
114 }
115
116 /*
117  * Function to handle the parsing of RTE Flows and placing
118  * the RTE flow actions into the ulp structures.
119  */
120 int32_t
121 bnxt_ulp_rte_parser_act_parse(const struct rte_flow_action actions[],
122                               struct ulp_rte_parser_params *params)
123 {
124         const struct rte_flow_action *action_item = actions;
125         struct bnxt_ulp_rte_act_info *hdr_info;
126
127         /* Parse all the items in the pattern */
128         while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
129                 /* get the header information from the flow_hdr_info table */
130                 hdr_info = &ulp_act_info[action_item->type];
131                 if (hdr_info->act_type ==
132                     BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
133                         BNXT_TF_DBG(ERR,
134                                     "Truflow parser does not support act %u\n",
135                                     action_item->type);
136                         return BNXT_TF_RC_ERROR;
137                 } else if (hdr_info->act_type ==
138                     BNXT_ULP_ACT_TYPE_SUPPORTED) {
139                         /* call the registered callback handler */
140                         if (hdr_info->proto_act_func) {
141                                 if (hdr_info->proto_act_func(action_item,
142                                                              params) !=
143                                     BNXT_TF_RC_SUCCESS) {
144                                         return BNXT_TF_RC_ERROR;
145                                 }
146                         }
147                 }
148                 action_item++;
149         }
150         /* update the implied port details */
151         ulp_rte_parser_implicit_act_port_process(params);
152         return BNXT_TF_RC_SUCCESS;
153 }
154
155 /*
156  * Function to handle the post processing of the parsing details
157  */
158 int32_t
159 bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
160 {
161         enum bnxt_ulp_direction_type dir;
162         enum bnxt_ulp_intf_type match_port_type, act_port_type;
163         uint32_t act_port_set;
164
165         /* Get the computed details */
166         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
167         match_port_type = ULP_COMP_FLD_IDX_RD(params,
168                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
169         act_port_type = ULP_COMP_FLD_IDX_RD(params,
170                                             BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
171         act_port_set = ULP_COMP_FLD_IDX_RD(params,
172                                            BNXT_ULP_CF_IDX_ACT_PORT_IS_SET);
173
174         /* set the flow direction in the proto and action header */
175         if (dir == BNXT_ULP_DIR_EGRESS) {
176                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
177                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
178                 ULP_BITMAP_SET(params->act_bitmap.bits,
179                                BNXT_ULP_FLOW_DIR_BITMASK_EGR);
180         }
181
182         /* calculate the VF to VF flag */
183         if (act_port_set && act_port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
184             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP)
185                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_VF_TO_VF, 1);
186
187         /* TBD: Handle the flow rejection scenarios */
188         return 0;
189 }
190
191 /*
192  * Function to compute the flow direction based on the match port details
193  */
194 static void
195 bnxt_ulp_rte_parser_direction_compute(struct ulp_rte_parser_params *params)
196 {
197         enum bnxt_ulp_intf_type match_port_type;
198
199         /* Get the match port type */
200         match_port_type = ULP_COMP_FLD_IDX_RD(params,
201                                               BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
202
203         /* If ingress flow and matchport is vf rep then dir is egress*/
204         if ((params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS) &&
205             match_port_type == BNXT_ULP_INTF_TYPE_VF_REP) {
206                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
207                                     BNXT_ULP_DIR_EGRESS);
208         } else {
209                 /* Assign the input direction */
210                 if (params->dir_attr & BNXT_ULP_FLOW_ATTR_INGRESS)
211                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
212                                             BNXT_ULP_DIR_INGRESS);
213                 else
214                         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_DIRECTION,
215                                             BNXT_ULP_DIR_EGRESS);
216         }
217 }
218
219 /* Function to handle the parsing of RTE Flow item PF Header. */
220 static int32_t
221 ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
222                         uint32_t ifindex,
223                         uint16_t mask)
224 {
225         uint16_t svif;
226         enum bnxt_ulp_direction_type dir;
227         struct ulp_rte_hdr_field *hdr_field;
228         enum bnxt_ulp_svif_type svif_type;
229         enum bnxt_ulp_intf_type port_type;
230
231         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
232             BNXT_ULP_INVALID_SVIF_VAL) {
233                 BNXT_TF_DBG(ERR,
234                             "SVIF already set,multiple source not support'd\n");
235                 return BNXT_TF_RC_ERROR;
236         }
237
238         /* Get port type details */
239         port_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
240         if (port_type == BNXT_ULP_INTF_TYPE_INVALID) {
241                 BNXT_TF_DBG(ERR, "Invalid port type\n");
242                 return BNXT_TF_RC_ERROR;
243         }
244
245         /* Update the match port type */
246         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE, port_type);
247
248         /* compute the direction */
249         bnxt_ulp_rte_parser_direction_compute(params);
250
251         /* Get the computed direction */
252         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
253         if (dir == BNXT_ULP_DIR_INGRESS) {
254                 svif_type = BNXT_ULP_PHY_PORT_SVIF;
255         } else {
256                 if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
257                         svif_type = BNXT_ULP_VF_FUNC_SVIF;
258                 else
259                         svif_type = BNXT_ULP_DRV_FUNC_SVIF;
260         }
261         ulp_port_db_svif_get(params->ulp_ctx, ifindex, svif_type,
262                              &svif);
263         svif = rte_cpu_to_be_16(svif);
264         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
265         memcpy(hdr_field->spec, &svif, sizeof(svif));
266         memcpy(hdr_field->mask, &mask, sizeof(mask));
267         hdr_field->size = sizeof(svif);
268         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
269                             rte_be_to_cpu_16(svif));
270         return BNXT_TF_RC_SUCCESS;
271 }
272
273 /* Function to handle the parsing of the RTE port id */
274 int32_t
275 ulp_rte_parser_implicit_match_port_process(struct ulp_rte_parser_params *params)
276 {
277         uint16_t port_id = 0;
278         uint16_t svif_mask = 0xFFFF;
279         uint32_t ifindex;
280         int32_t rc = BNXT_TF_RC_ERROR;
281
282         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_SVIF_FLAG) !=
283             BNXT_ULP_INVALID_SVIF_VAL)
284                 return BNXT_TF_RC_SUCCESS;
285
286         /* SVIF not set. So get the port id */
287         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
288
289         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
290                                               port_id,
291                                               &ifindex)) {
292                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
293                 return rc;
294         }
295
296         /* Update the SVIF details */
297         rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
298         return rc;
299 }
300
301 /* Function to handle the implicit action port id */
302 int32_t
303 ulp_rte_parser_implicit_act_port_process(struct ulp_rte_parser_params *params)
304 {
305         struct rte_flow_action action_item = {0};
306         struct rte_flow_action_port_id port_id = {0};
307
308         /* Read the action port set bit */
309         if (ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET)) {
310                 /* Already set, so just exit */
311                 return BNXT_TF_RC_SUCCESS;
312         }
313         port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
314         action_item.conf = &port_id;
315
316         /* Update the action port based on incoming port */
317         ulp_rte_port_id_act_handler(&action_item, params);
318
319         /* Reset the action port set bit */
320         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
321         return BNXT_TF_RC_SUCCESS;
322 }
323
324 /* Function to handle the parsing of RTE Flow item PF Header. */
325 int32_t
326 ulp_rte_pf_hdr_handler(const struct rte_flow_item *item __rte_unused,
327                        struct ulp_rte_parser_params *params)
328 {
329         uint16_t port_id = 0;
330         uint16_t svif_mask = 0xFFFF;
331         uint32_t ifindex;
332
333         /* Get the implicit port id */
334         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
335
336         /* perform the conversion from dpdk port to bnxt ifindex */
337         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
338                                               port_id,
339                                               &ifindex)) {
340                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
341                 return BNXT_TF_RC_ERROR;
342         }
343
344         /* Update the SVIF details */
345         return  ulp_rte_parser_svif_set(params, ifindex, svif_mask);
346 }
347
348 /* Function to handle the parsing of RTE Flow item VF Header. */
349 int32_t
350 ulp_rte_vf_hdr_handler(const struct rte_flow_item *item,
351                        struct ulp_rte_parser_params *params)
352 {
353         const struct rte_flow_item_vf *vf_spec = item->spec;
354         const struct rte_flow_item_vf *vf_mask = item->mask;
355         uint16_t mask = 0;
356         uint32_t ifindex;
357         int32_t rc = BNXT_TF_RC_PARSE_ERR;
358
359         /* Get VF rte_flow_item for Port details */
360         if (!vf_spec) {
361                 BNXT_TF_DBG(ERR, "ParseErr:VF id is not valid\n");
362                 return rc;
363         }
364         if (!vf_mask) {
365                 BNXT_TF_DBG(ERR, "ParseErr:VF mask is not valid\n");
366                 return rc;
367         }
368         mask = vf_mask->id;
369
370         /* perform the conversion from VF Func id to bnxt ifindex */
371         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx,
372                                                  vf_spec->id,
373                                                  &ifindex)) {
374                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
375                 return rc;
376         }
377         /* Update the SVIF details */
378         return ulp_rte_parser_svif_set(params, ifindex, mask);
379 }
380
381 /* Function to handle the parsing of RTE Flow item port id  Header. */
382 int32_t
383 ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
384                             struct ulp_rte_parser_params *params)
385 {
386         const struct rte_flow_item_port_id *port_spec = item->spec;
387         const struct rte_flow_item_port_id *port_mask = item->mask;
388         uint16_t mask = 0;
389         int32_t rc = BNXT_TF_RC_PARSE_ERR;
390         uint32_t ifindex;
391
392         if (!port_spec) {
393                 BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
394                 return rc;
395         }
396         if (!port_mask) {
397                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
398                 return rc;
399         }
400         mask = port_mask->id;
401
402         /* perform the conversion from dpdk port to bnxt ifindex */
403         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
404                                               port_spec->id,
405                                               &ifindex)) {
406                 BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
407                 return rc;
408         }
409         /* Update the SVIF details */
410         return ulp_rte_parser_svif_set(params, ifindex, mask);
411 }
412
413 /* Function to handle the parsing of RTE Flow item phy port Header. */
414 int32_t
415 ulp_rte_phy_port_hdr_handler(const struct rte_flow_item *item,
416                              struct ulp_rte_parser_params *params)
417 {
418         const struct rte_flow_item_phy_port *port_spec = item->spec;
419         const struct rte_flow_item_phy_port *port_mask = item->mask;
420         uint16_t mask = 0;
421         int32_t rc = BNXT_TF_RC_ERROR;
422         uint16_t svif;
423         enum bnxt_ulp_direction_type dir;
424         struct ulp_rte_hdr_field *hdr_field;
425
426         /* Copy the rte_flow_item for phy port into hdr_field */
427         if (!port_spec) {
428                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port id is not valid\n");
429                 return rc;
430         }
431         if (!port_mask) {
432                 BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
433                 return rc;
434         }
435         mask = port_mask->index;
436
437         /* Update the match port type */
438         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_MATCH_PORT_TYPE,
439                             BNXT_ULP_INTF_TYPE_PHY_PORT);
440
441         /* Compute the Hw direction */
442         bnxt_ulp_rte_parser_direction_compute(params);
443
444         /* Direction validation */
445         dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
446         if (dir == BNXT_ULP_DIR_EGRESS) {
447                 BNXT_TF_DBG(ERR,
448                             "Parse Err:Phy ports are valid only for ingress\n");
449                 return BNXT_TF_RC_PARSE_ERR;
450         }
451
452         /* Get the physical port details from port db */
453         rc = ulp_port_db_phy_port_svif_get(params->ulp_ctx, port_spec->index,
454                                            &svif);
455         if (rc) {
456                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
457                 return BNXT_TF_RC_PARSE_ERR;
458         }
459
460         /* Update the SVIF details */
461         svif = rte_cpu_to_be_16(svif);
462         hdr_field = &params->hdr_field[BNXT_ULP_PROTO_HDR_FIELD_SVIF_IDX];
463         memcpy(hdr_field->spec, &svif, sizeof(svif));
464         memcpy(hdr_field->mask, &mask, sizeof(mask));
465         hdr_field->size = sizeof(svif);
466         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_SVIF_FLAG,
467                             rte_be_to_cpu_16(svif));
468         return BNXT_TF_RC_SUCCESS;
469 }
470
471 /* Function to handle the parsing of RTE Flow item Ethernet Header. */
472 int32_t
473 ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
474                         struct ulp_rte_parser_params *params)
475 {
476         const struct rte_flow_item_eth *eth_spec = item->spec;
477         const struct rte_flow_item_eth *eth_mask = item->mask;
478         struct ulp_rte_hdr_field *field;
479         uint32_t idx = params->field_idx;
480         uint64_t set_flag = 0;
481         uint32_t size;
482
483         /*
484          * Copy the rte_flow_item for eth into hdr_field using ethernet
485          * header fields
486          */
487         if (eth_spec) {
488                 size = sizeof(eth_spec->dst.addr_bytes);
489                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
490                                                 eth_spec->dst.addr_bytes,
491                                                 size);
492                 size = sizeof(eth_spec->src.addr_bytes);
493                 field = ulp_rte_parser_fld_copy(field,
494                                                 eth_spec->src.addr_bytes,
495                                                 size);
496                 field = ulp_rte_parser_fld_copy(field,
497                                                 &eth_spec->type,
498                                                 sizeof(eth_spec->type));
499         }
500         if (eth_mask) {
501                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->dst.addr_bytes,
502                                        sizeof(eth_mask->dst.addr_bytes));
503                 ulp_rte_prsr_mask_copy(params, &idx, eth_mask->src.addr_bytes,
504                                        sizeof(eth_mask->src.addr_bytes));
505                 ulp_rte_prsr_mask_copy(params, &idx, &eth_mask->type,
506                                        sizeof(eth_mask->type));
507         }
508         /* Add number of vlan header elements */
509         params->field_idx += BNXT_ULP_PROTO_HDR_ETH_NUM;
510         params->vlan_idx = params->field_idx;
511         params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
512
513         /* Update the hdr_bitmap with BNXT_ULP_HDR_PROTO_I_ETH */
514         set_flag = ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
515                                     BNXT_ULP_HDR_BIT_O_ETH);
516         if (set_flag)
517                 ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
518         else
519                 ULP_BITMAP_RESET(params->hdr_bitmap.bits,
520                                  BNXT_ULP_HDR_BIT_I_ETH);
521
522         /* update the hdr_bitmap with BNXT_ULP_HDR_PROTO_O_ETH */
523         ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
524
525         return BNXT_TF_RC_SUCCESS;
526 }
527
528 /* Function to handle the parsing of RTE Flow item Vlan Header. */
529 int32_t
530 ulp_rte_vlan_hdr_handler(const struct rte_flow_item *item,
531                          struct ulp_rte_parser_params *params)
532 {
533         const struct rte_flow_item_vlan *vlan_spec = item->spec;
534         const struct rte_flow_item_vlan *vlan_mask = item->mask;
535         struct ulp_rte_hdr_field *field;
536         struct ulp_rte_hdr_bitmap       *hdr_bit;
537         uint32_t idx = params->vlan_idx;
538         uint16_t vlan_tag, priority;
539         uint32_t outer_vtag_num;
540         uint32_t inner_vtag_num;
541
542         /*
543          * Copy the rte_flow_item for vlan into hdr_field using Vlan
544          * header fields
545          */
546         if (vlan_spec) {
547                 vlan_tag = ntohs(vlan_spec->tci);
548                 priority = htons(vlan_tag >> 13);
549                 vlan_tag &= 0xfff;
550                 vlan_tag = htons(vlan_tag);
551
552                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
553                                                 &priority,
554                                                 sizeof(priority));
555                 field = ulp_rte_parser_fld_copy(field,
556                                                 &vlan_tag,
557                                                 sizeof(vlan_tag));
558                 field = ulp_rte_parser_fld_copy(field,
559                                                 &vlan_spec->inner_type,
560                                                 sizeof(vlan_spec->inner_type));
561         }
562
563         if (vlan_mask) {
564                 vlan_tag = ntohs(vlan_mask->tci);
565                 priority = htons(vlan_tag >> 13);
566                 vlan_tag &= 0xfff;
567                 vlan_tag = htons(vlan_tag);
568
569                 field = &params->hdr_field[idx];
570                 memcpy(field->mask, &priority, field->size);
571                 field++;
572                 memcpy(field->mask, &vlan_tag, field->size);
573                 field++;
574                 memcpy(field->mask, &vlan_mask->inner_type, field->size);
575         }
576         /* Set the vlan index to new incremented value */
577         params->vlan_idx += BNXT_ULP_PROTO_HDR_S_VLAN_NUM;
578
579         /* Get the outer tag and inner tag counts */
580         outer_vtag_num = ULP_COMP_FLD_IDX_RD(params,
581                                              BNXT_ULP_CF_IDX_O_VTAG_NUM);
582         inner_vtag_num = ULP_COMP_FLD_IDX_RD(params,
583                                              BNXT_ULP_CF_IDX_I_VTAG_NUM);
584
585         /* Update the hdr_bitmap of the vlans */
586         hdr_bit = &params->hdr_bitmap;
587         if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
588             !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
589             !outer_vtag_num) {
590                 /* Update the vlan tag num */
591                 outer_vtag_num++;
592                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
593                                     outer_vtag_num);
594                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_NO_VTAG, 0);
595                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
596                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
597                                BNXT_ULP_HDR_BIT_OO_VLAN);
598         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
599                    !ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
600                    outer_vtag_num == 1) {
601                 /* update the vlan tag num */
602                 outer_vtag_num++;
603                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_VTAG_NUM,
604                                     outer_vtag_num);
605                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_TWO_VTAGS, 1);
606                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
607                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
608                                BNXT_ULP_HDR_BIT_OI_VLAN);
609         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
610                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
611                    !inner_vtag_num) {
612                 /* update the vlan tag num */
613                 inner_vtag_num++;
614                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
615                                     inner_vtag_num);
616                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_NO_VTAG, 0);
617                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
618                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
619                                BNXT_ULP_HDR_BIT_IO_VLAN);
620         } else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
621                    ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
622                    inner_vtag_num == 1) {
623                 /* update the vlan tag num */
624                 inner_vtag_num++;
625                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_VTAG_NUM,
626                                     inner_vtag_num);
627                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_TWO_VTAGS, 1);
628                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
629                 ULP_BITMAP_SET(params->hdr_bitmap.bits,
630                                BNXT_ULP_HDR_BIT_II_VLAN);
631         } else {
632                 BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found withtout eth\n");
633                 return BNXT_TF_RC_ERROR;
634         }
635         return BNXT_TF_RC_SUCCESS;
636 }
637
638 /* Function to handle the parsing of RTE Flow item IPV4 Header. */
639 int32_t
640 ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item,
641                          struct ulp_rte_parser_params *params)
642 {
643         const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
644         const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
645         struct ulp_rte_hdr_field *field;
646         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
647         uint32_t idx = params->field_idx;
648         uint32_t size;
649         uint32_t inner_l3, outer_l3;
650
651         inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
652         if (inner_l3) {
653                 BNXT_TF_DBG(ERR, "Parse Error:Third L3 header not supported\n");
654                 return BNXT_TF_RC_ERROR;
655         }
656
657         /*
658          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
659          * header fields
660          */
661         if (ipv4_spec) {
662                 size = sizeof(ipv4_spec->hdr.version_ihl);
663                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
664                                                 &ipv4_spec->hdr.version_ihl,
665                                                 size);
666                 size = sizeof(ipv4_spec->hdr.type_of_service);
667                 field = ulp_rte_parser_fld_copy(field,
668                                                 &ipv4_spec->hdr.type_of_service,
669                                                 size);
670                 size = sizeof(ipv4_spec->hdr.total_length);
671                 field = ulp_rte_parser_fld_copy(field,
672                                                 &ipv4_spec->hdr.total_length,
673                                                 size);
674                 size = sizeof(ipv4_spec->hdr.packet_id);
675                 field = ulp_rte_parser_fld_copy(field,
676                                                 &ipv4_spec->hdr.packet_id,
677                                                 size);
678                 size = sizeof(ipv4_spec->hdr.fragment_offset);
679                 field = ulp_rte_parser_fld_copy(field,
680                                                 &ipv4_spec->hdr.fragment_offset,
681                                                 size);
682                 size = sizeof(ipv4_spec->hdr.time_to_live);
683                 field = ulp_rte_parser_fld_copy(field,
684                                                 &ipv4_spec->hdr.time_to_live,
685                                                 size);
686                 size = sizeof(ipv4_spec->hdr.next_proto_id);
687                 field = ulp_rte_parser_fld_copy(field,
688                                                 &ipv4_spec->hdr.next_proto_id,
689                                                 size);
690                 size = sizeof(ipv4_spec->hdr.hdr_checksum);
691                 field = ulp_rte_parser_fld_copy(field,
692                                                 &ipv4_spec->hdr.hdr_checksum,
693                                                 size);
694                 size = sizeof(ipv4_spec->hdr.src_addr);
695                 field = ulp_rte_parser_fld_copy(field,
696                                                 &ipv4_spec->hdr.src_addr,
697                                                 size);
698                 size = sizeof(ipv4_spec->hdr.dst_addr);
699                 field = ulp_rte_parser_fld_copy(field,
700                                                 &ipv4_spec->hdr.dst_addr,
701                                                 size);
702         }
703         if (ipv4_mask) {
704                 ulp_rte_prsr_mask_copy(params, &idx,
705                                        &ipv4_mask->hdr.version_ihl,
706                                        sizeof(ipv4_mask->hdr.version_ihl));
707                 ulp_rte_prsr_mask_copy(params, &idx,
708                                        &ipv4_mask->hdr.type_of_service,
709                                        sizeof(ipv4_mask->hdr.type_of_service));
710                 ulp_rte_prsr_mask_copy(params, &idx,
711                                        &ipv4_mask->hdr.total_length,
712                                        sizeof(ipv4_mask->hdr.total_length));
713                 ulp_rte_prsr_mask_copy(params, &idx,
714                                        &ipv4_mask->hdr.packet_id,
715                                        sizeof(ipv4_mask->hdr.packet_id));
716                 ulp_rte_prsr_mask_copy(params, &idx,
717                                        &ipv4_mask->hdr.fragment_offset,
718                                        sizeof(ipv4_mask->hdr.fragment_offset));
719                 ulp_rte_prsr_mask_copy(params, &idx,
720                                        &ipv4_mask->hdr.time_to_live,
721                                        sizeof(ipv4_mask->hdr.time_to_live));
722                 ulp_rte_prsr_mask_copy(params, &idx,
723                                        &ipv4_mask->hdr.next_proto_id,
724                                        sizeof(ipv4_mask->hdr.next_proto_id));
725                 ulp_rte_prsr_mask_copy(params, &idx,
726                                        &ipv4_mask->hdr.hdr_checksum,
727                                        sizeof(ipv4_mask->hdr.hdr_checksum));
728                 ulp_rte_prsr_mask_copy(params, &idx,
729                                        &ipv4_mask->hdr.src_addr,
730                                        sizeof(ipv4_mask->hdr.src_addr));
731                 ulp_rte_prsr_mask_copy(params, &idx,
732                                        &ipv4_mask->hdr.dst_addr,
733                                        sizeof(ipv4_mask->hdr.dst_addr));
734         }
735         /* Add the number of ipv4 header elements */
736         params->field_idx += BNXT_ULP_PROTO_HDR_IPV4_NUM;
737
738         /* Set the ipv4 header bitmap and computed l3 header bitmaps */
739         outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
740         if (outer_l3 ||
741             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
742             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
743                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV4);
744                 inner_l3++;
745                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, inner_l3);
746         } else {
747                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
748                 outer_l3++;
749                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, outer_l3);
750         }
751         return BNXT_TF_RC_SUCCESS;
752 }
753
754 /* Function to handle the parsing of RTE Flow item IPV6 Header */
755 int32_t
756 ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item,
757                          struct ulp_rte_parser_params *params)
758 {
759         const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
760         const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
761         struct ulp_rte_hdr_field *field;
762         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
763         uint32_t idx = params->field_idx;
764         uint32_t size;
765         uint32_t inner_l3, outer_l3;
766         uint32_t vtcf, vtcf_mask;
767
768         inner_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L3);
769         if (inner_l3) {
770                 BNXT_TF_DBG(ERR, "Parse Error: 3'rd L3 header not supported\n");
771                 return BNXT_TF_RC_ERROR;
772         }
773
774         /*
775          * Copy the rte_flow_item for ipv6 into hdr_field using ipv6
776          * header fields
777          */
778         if (ipv6_spec) {
779                 size = sizeof(ipv6_spec->hdr.vtc_flow);
780
781                 vtcf = BNXT_ULP_GET_IPV6_VER(ipv6_spec->hdr.vtc_flow);
782                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
783                                                 &vtcf,
784                                                 size);
785
786                 vtcf = BNXT_ULP_GET_IPV6_TC(ipv6_spec->hdr.vtc_flow);
787                 field = ulp_rte_parser_fld_copy(field,
788                                                 &vtcf,
789                                                 size);
790
791                 vtcf = BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_spec->hdr.vtc_flow);
792                 field = ulp_rte_parser_fld_copy(field,
793                                                 &vtcf,
794                                                 size);
795
796                 size = sizeof(ipv6_spec->hdr.payload_len);
797                 field = ulp_rte_parser_fld_copy(field,
798                                                 &ipv6_spec->hdr.payload_len,
799                                                 size);
800                 size = sizeof(ipv6_spec->hdr.proto);
801                 field = ulp_rte_parser_fld_copy(field,
802                                                 &ipv6_spec->hdr.proto,
803                                                 size);
804                 size = sizeof(ipv6_spec->hdr.hop_limits);
805                 field = ulp_rte_parser_fld_copy(field,
806                                                 &ipv6_spec->hdr.hop_limits,
807                                                 size);
808                 size = sizeof(ipv6_spec->hdr.src_addr);
809                 field = ulp_rte_parser_fld_copy(field,
810                                                 &ipv6_spec->hdr.src_addr,
811                                                 size);
812                 size = sizeof(ipv6_spec->hdr.dst_addr);
813                 field = ulp_rte_parser_fld_copy(field,
814                                                 &ipv6_spec->hdr.dst_addr,
815                                                 size);
816         }
817         if (ipv6_mask) {
818                 size = sizeof(ipv6_mask->hdr.vtc_flow);
819
820                 vtcf_mask = BNXT_ULP_GET_IPV6_VER(ipv6_mask->hdr.vtc_flow);
821                 ulp_rte_prsr_mask_copy(params, &idx,
822                                        &vtcf_mask,
823                                        size);
824
825                 vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
826                 ulp_rte_prsr_mask_copy(params, &idx,
827                                        &vtcf_mask,
828                                        size);
829
830                 vtcf_mask =
831                         BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
832                 ulp_rte_prsr_mask_copy(params, &idx,
833                                        &vtcf_mask,
834                                        size);
835
836                 ulp_rte_prsr_mask_copy(params, &idx,
837                                        &ipv6_mask->hdr.payload_len,
838                                        sizeof(ipv6_mask->hdr.payload_len));
839                 ulp_rte_prsr_mask_copy(params, &idx,
840                                        &ipv6_mask->hdr.proto,
841                                        sizeof(ipv6_mask->hdr.proto));
842                 ulp_rte_prsr_mask_copy(params, &idx,
843                                        &ipv6_mask->hdr.hop_limits,
844                                        sizeof(ipv6_mask->hdr.hop_limits));
845                 ulp_rte_prsr_mask_copy(params, &idx,
846                                        &ipv6_mask->hdr.src_addr,
847                                        sizeof(ipv6_mask->hdr.src_addr));
848                 ulp_rte_prsr_mask_copy(params, &idx,
849                                        &ipv6_mask->hdr.dst_addr,
850                                        sizeof(ipv6_mask->hdr.dst_addr));
851         }
852         /* add number of ipv6 header elements */
853         params->field_idx += BNXT_ULP_PROTO_HDR_IPV6_NUM;
854
855         /* Set the ipv6 header bitmap and computed l3 header bitmaps */
856         outer_l3 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L3);
857         if (outer_l3 ||
858             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4) ||
859             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6)) {
860                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_IPV6);
861                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3, 1);
862         } else {
863                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
864                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
865         }
866         return BNXT_TF_RC_SUCCESS;
867 }
868
869 /* Function to handle the parsing of RTE Flow item UDP Header. */
870 int32_t
871 ulp_rte_udp_hdr_handler(const struct rte_flow_item *item,
872                         struct ulp_rte_parser_params *params)
873 {
874         const struct rte_flow_item_udp *udp_spec = item->spec;
875         const struct rte_flow_item_udp *udp_mask = item->mask;
876         struct ulp_rte_hdr_field *field;
877         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
878         uint32_t idx = params->field_idx;
879         uint32_t size;
880         uint32_t inner_l4, outer_l4;
881
882         inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
883         if (inner_l4) {
884                 BNXT_TF_DBG(ERR, "Parse Err:Third L4 header not supported\n");
885                 return BNXT_TF_RC_ERROR;
886         }
887
888         /*
889          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
890          * header fields
891          */
892         if (udp_spec) {
893                 size = sizeof(udp_spec->hdr.src_port);
894                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
895                                                 &udp_spec->hdr.src_port,
896                                                 size);
897                 size = sizeof(udp_spec->hdr.dst_port);
898                 field = ulp_rte_parser_fld_copy(field,
899                                                 &udp_spec->hdr.dst_port,
900                                                 size);
901                 size = sizeof(udp_spec->hdr.dgram_len);
902                 field = ulp_rte_parser_fld_copy(field,
903                                                 &udp_spec->hdr.dgram_len,
904                                                 size);
905                 size = sizeof(udp_spec->hdr.dgram_cksum);
906                 field = ulp_rte_parser_fld_copy(field,
907                                                 &udp_spec->hdr.dgram_cksum,
908                                                 size);
909         }
910         if (udp_mask) {
911                 ulp_rte_prsr_mask_copy(params, &idx,
912                                        &udp_mask->hdr.src_port,
913                                        sizeof(udp_mask->hdr.src_port));
914                 ulp_rte_prsr_mask_copy(params, &idx,
915                                        &udp_mask->hdr.dst_port,
916                                        sizeof(udp_mask->hdr.dst_port));
917                 ulp_rte_prsr_mask_copy(params, &idx,
918                                        &udp_mask->hdr.dgram_len,
919                                        sizeof(udp_mask->hdr.dgram_len));
920                 ulp_rte_prsr_mask_copy(params, &idx,
921                                        &udp_mask->hdr.dgram_cksum,
922                                        sizeof(udp_mask->hdr.dgram_cksum));
923         }
924
925         /* Add number of UDP header elements */
926         params->field_idx += BNXT_ULP_PROTO_HDR_UDP_NUM;
927
928         /* Set the udp header bitmap and computed l4 header bitmaps */
929         outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
930         if (outer_l4 ||
931             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
932             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
933                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
934                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
935         } else {
936                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
937                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
938         }
939         return BNXT_TF_RC_SUCCESS;
940 }
941
942 /* Function to handle the parsing of RTE Flow item TCP Header. */
943 int32_t
944 ulp_rte_tcp_hdr_handler(const struct rte_flow_item *item,
945                         struct ulp_rte_parser_params *params)
946 {
947         const struct rte_flow_item_tcp *tcp_spec = item->spec;
948         const struct rte_flow_item_tcp *tcp_mask = item->mask;
949         struct ulp_rte_hdr_field *field;
950         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
951         uint32_t idx = params->field_idx;
952         uint32_t size;
953         uint32_t inner_l4, outer_l4;
954
955         inner_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_I_L4);
956         if (inner_l4) {
957                 BNXT_TF_DBG(ERR, "Parse Error:Third L4 header not supported\n");
958                 return BNXT_TF_RC_ERROR;
959         }
960
961         /*
962          * Copy the rte_flow_item for ipv4 into hdr_field using ipv4
963          * header fields
964          */
965         if (tcp_spec) {
966                 size = sizeof(tcp_spec->hdr.src_port);
967                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
968                                                 &tcp_spec->hdr.src_port,
969                                                 size);
970                 size = sizeof(tcp_spec->hdr.dst_port);
971                 field = ulp_rte_parser_fld_copy(field,
972                                                 &tcp_spec->hdr.dst_port,
973                                                 size);
974                 size = sizeof(tcp_spec->hdr.sent_seq);
975                 field = ulp_rte_parser_fld_copy(field,
976                                                 &tcp_spec->hdr.sent_seq,
977                                                 size);
978                 size = sizeof(tcp_spec->hdr.recv_ack);
979                 field = ulp_rte_parser_fld_copy(field,
980                                                 &tcp_spec->hdr.recv_ack,
981                                                 size);
982                 size = sizeof(tcp_spec->hdr.data_off);
983                 field = ulp_rte_parser_fld_copy(field,
984                                                 &tcp_spec->hdr.data_off,
985                                                 size);
986                 size = sizeof(tcp_spec->hdr.tcp_flags);
987                 field = ulp_rte_parser_fld_copy(field,
988                                                 &tcp_spec->hdr.tcp_flags,
989                                                 size);
990                 size = sizeof(tcp_spec->hdr.rx_win);
991                 field = ulp_rte_parser_fld_copy(field,
992                                                 &tcp_spec->hdr.rx_win,
993                                                 size);
994                 size = sizeof(tcp_spec->hdr.cksum);
995                 field = ulp_rte_parser_fld_copy(field,
996                                                 &tcp_spec->hdr.cksum,
997                                                 size);
998                 size = sizeof(tcp_spec->hdr.tcp_urp);
999                 field = ulp_rte_parser_fld_copy(field,
1000                                                 &tcp_spec->hdr.tcp_urp,
1001                                                 size);
1002         } else {
1003                 idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1004         }
1005
1006         if (tcp_mask) {
1007                 ulp_rte_prsr_mask_copy(params, &idx,
1008                                        &tcp_mask->hdr.src_port,
1009                                        sizeof(tcp_mask->hdr.src_port));
1010                 ulp_rte_prsr_mask_copy(params, &idx,
1011                                        &tcp_mask->hdr.dst_port,
1012                                        sizeof(tcp_mask->hdr.dst_port));
1013                 ulp_rte_prsr_mask_copy(params, &idx,
1014                                        &tcp_mask->hdr.sent_seq,
1015                                        sizeof(tcp_mask->hdr.sent_seq));
1016                 ulp_rte_prsr_mask_copy(params, &idx,
1017                                        &tcp_mask->hdr.recv_ack,
1018                                        sizeof(tcp_mask->hdr.recv_ack));
1019                 ulp_rte_prsr_mask_copy(params, &idx,
1020                                        &tcp_mask->hdr.data_off,
1021                                        sizeof(tcp_mask->hdr.data_off));
1022                 ulp_rte_prsr_mask_copy(params, &idx,
1023                                        &tcp_mask->hdr.tcp_flags,
1024                                        sizeof(tcp_mask->hdr.tcp_flags));
1025                 ulp_rte_prsr_mask_copy(params, &idx,
1026                                        &tcp_mask->hdr.rx_win,
1027                                        sizeof(tcp_mask->hdr.rx_win));
1028                 ulp_rte_prsr_mask_copy(params, &idx,
1029                                        &tcp_mask->hdr.cksum,
1030                                        sizeof(tcp_mask->hdr.cksum));
1031                 ulp_rte_prsr_mask_copy(params, &idx,
1032                                        &tcp_mask->hdr.tcp_urp,
1033                                        sizeof(tcp_mask->hdr.tcp_urp));
1034         }
1035         /* add number of TCP header elements */
1036         params->field_idx += BNXT_ULP_PROTO_HDR_TCP_NUM;
1037
1038         /* Set the udp header bitmap and computed l4 header bitmaps */
1039         outer_l4 = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_O_L4);
1040         if (outer_l4 ||
1041             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
1042             ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
1043                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
1044                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
1045         } else {
1046                 ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
1047                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
1048         }
1049         return BNXT_TF_RC_SUCCESS;
1050 }
1051
1052 /* Function to handle the parsing of RTE Flow item Vxlan Header. */
1053 int32_t
1054 ulp_rte_vxlan_hdr_handler(const struct rte_flow_item *item,
1055                           struct ulp_rte_parser_params *params)
1056 {
1057         const struct rte_flow_item_vxlan *vxlan_spec = item->spec;
1058         const struct rte_flow_item_vxlan *vxlan_mask = item->mask;
1059         struct ulp_rte_hdr_field *field;
1060         struct ulp_rte_hdr_bitmap *hdr_bitmap = &params->hdr_bitmap;
1061         uint32_t idx = params->field_idx;
1062         uint32_t size;
1063
1064         /*
1065          * Copy the rte_flow_item for vxlan into hdr_field using vxlan
1066          * header fields
1067          */
1068         if (vxlan_spec) {
1069                 size = sizeof(vxlan_spec->flags);
1070                 field = ulp_rte_parser_fld_copy(&params->hdr_field[idx],
1071                                                 &vxlan_spec->flags,
1072                                                 size);
1073                 size = sizeof(vxlan_spec->rsvd0);
1074                 field = ulp_rte_parser_fld_copy(field,
1075                                                 &vxlan_spec->rsvd0,
1076                                                 size);
1077                 size = sizeof(vxlan_spec->vni);
1078                 field = ulp_rte_parser_fld_copy(field,
1079                                                 &vxlan_spec->vni,
1080                                                 size);
1081                 size = sizeof(vxlan_spec->rsvd1);
1082                 field = ulp_rte_parser_fld_copy(field,
1083                                                 &vxlan_spec->rsvd1,
1084                                                 size);
1085         }
1086         if (vxlan_mask) {
1087                 ulp_rte_prsr_mask_copy(params, &idx,
1088                                        &vxlan_mask->flags,
1089                                        sizeof(vxlan_mask->flags));
1090                 ulp_rte_prsr_mask_copy(params, &idx,
1091                                        &vxlan_mask->rsvd0,
1092                                        sizeof(vxlan_mask->rsvd0));
1093                 ulp_rte_prsr_mask_copy(params, &idx,
1094                                        &vxlan_mask->vni,
1095                                        sizeof(vxlan_mask->vni));
1096                 ulp_rte_prsr_mask_copy(params, &idx,
1097                                        &vxlan_mask->rsvd1,
1098                                        sizeof(vxlan_mask->rsvd1));
1099         }
1100         /* Add number of vxlan header elements */
1101         params->field_idx += BNXT_ULP_PROTO_HDR_VXLAN_NUM;
1102
1103         /* Update the hdr_bitmap with vxlan */
1104         ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
1105         return BNXT_TF_RC_SUCCESS;
1106 }
1107
1108 /* Function to handle the parsing of RTE Flow item void Header */
1109 int32_t
1110 ulp_rte_void_hdr_handler(const struct rte_flow_item *item __rte_unused,
1111                          struct ulp_rte_parser_params *params __rte_unused)
1112 {
1113         return BNXT_TF_RC_SUCCESS;
1114 }
1115
1116 /* Function to handle the parsing of RTE Flow action void Header. */
1117 int32_t
1118 ulp_rte_void_act_handler(const struct rte_flow_action *action_item __rte_unused,
1119                          struct ulp_rte_parser_params *params __rte_unused)
1120 {
1121         return BNXT_TF_RC_SUCCESS;
1122 }
1123
1124 /* Function to handle the parsing of RTE Flow action Mark Header. */
1125 int32_t
1126 ulp_rte_mark_act_handler(const struct rte_flow_action *action_item,
1127                          struct ulp_rte_parser_params *param)
1128 {
1129         const struct rte_flow_action_mark *mark;
1130         struct ulp_rte_act_bitmap *act = &param->act_bitmap;
1131         uint32_t mark_id;
1132
1133         mark = action_item->conf;
1134         if (mark) {
1135                 mark_id = tfp_cpu_to_be_32(mark->id);
1136                 memcpy(&param->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_MARK],
1137                        &mark_id, BNXT_ULP_ACT_PROP_SZ_MARK);
1138
1139                 /* Update the hdr_bitmap with vxlan */
1140                 ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_MARK);
1141                 return BNXT_TF_RC_SUCCESS;
1142         }
1143         BNXT_TF_DBG(ERR, "Parse Error: Mark arg is invalid\n");
1144         return BNXT_TF_RC_ERROR;
1145 }
1146
1147 /* Function to handle the parsing of RTE Flow action RSS Header. */
1148 int32_t
1149 ulp_rte_rss_act_handler(const struct rte_flow_action *action_item,
1150                         struct ulp_rte_parser_params *param)
1151 {
1152         const struct rte_flow_action_rss *rss = action_item->conf;
1153
1154         if (rss) {
1155                 /* Update the hdr_bitmap with vxlan */
1156                 ULP_BITMAP_SET(param->act_bitmap.bits, BNXT_ULP_ACTION_BIT_RSS);
1157                 return BNXT_TF_RC_SUCCESS;
1158         }
1159         BNXT_TF_DBG(ERR, "Parse Error: RSS arg is invalid\n");
1160         return BNXT_TF_RC_ERROR;
1161 }
1162
1163 /* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
1164 int32_t
1165 ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
1166                                 struct ulp_rte_parser_params *params)
1167 {
1168         const struct rte_flow_action_vxlan_encap *vxlan_encap;
1169         const struct rte_flow_item *item;
1170         const struct rte_flow_item_eth *eth_spec;
1171         const struct rte_flow_item_ipv4 *ipv4_spec;
1172         const struct rte_flow_item_ipv6 *ipv6_spec;
1173         struct rte_flow_item_vxlan vxlan_spec;
1174         uint32_t vlan_num = 0, vlan_size = 0;
1175         uint32_t ip_size = 0, ip_type = 0;
1176         uint32_t vxlan_size = 0;
1177         uint8_t *buff;
1178         /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
1179         const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
1180                                     0x00, 0x40, 0x11};
1181         struct ulp_rte_act_bitmap *act = &params->act_bitmap;
1182         struct ulp_rte_act_prop *ap = &params->act_prop;
1183
1184         vxlan_encap = action_item->conf;
1185         if (!vxlan_encap) {
1186                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan_encap arg is invalid\n");
1187                 return BNXT_TF_RC_ERROR;
1188         }
1189
1190         item = vxlan_encap->definition;
1191         if (!item) {
1192                 BNXT_TF_DBG(ERR, "Parse Error: definition arg is invalid\n");
1193                 return BNXT_TF_RC_ERROR;
1194         }
1195
1196         if (!ulp_rte_item_skip_void(&item, 0))
1197                 return BNXT_TF_RC_ERROR;
1198
1199         /* must have ethernet header */
1200         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
1201                 BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
1202                 return BNXT_TF_RC_ERROR;
1203         }
1204         eth_spec = item->spec;
1205         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
1206         ulp_encap_buffer_copy(buff,
1207                               eth_spec->dst.addr_bytes,
1208                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
1209
1210         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
1211         ulp_encap_buffer_copy(buff,
1212                               eth_spec->src.addr_bytes,
1213                               BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
1214
1215         /* Goto the next item */
1216         if (!ulp_rte_item_skip_void(&item, 1))
1217                 return BNXT_TF_RC_ERROR;
1218
1219         /* May have vlan header */
1220         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1221                 vlan_num++;
1222                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
1223                 ulp_encap_buffer_copy(buff,
1224                                       item->spec,
1225                                       sizeof(struct rte_flow_item_vlan));
1226
1227                 if (!ulp_rte_item_skip_void(&item, 1))
1228                         return BNXT_TF_RC_ERROR;
1229         }
1230
1231         /* may have two vlan headers */
1232         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1233                 vlan_num++;
1234                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
1235                        sizeof(struct rte_flow_item_vlan)],
1236                        item->spec,
1237                        sizeof(struct rte_flow_item_vlan));
1238                 if (!ulp_rte_item_skip_void(&item, 1))
1239                         return BNXT_TF_RC_ERROR;
1240         }
1241         /* Update the vlan count and size of more than one */
1242         if (vlan_num) {
1243                 vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
1244                 vlan_num = tfp_cpu_to_be_32(vlan_num);
1245                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
1246                        &vlan_num,
1247                        sizeof(uint32_t));
1248                 vlan_size = tfp_cpu_to_be_32(vlan_size);
1249                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_SZ],
1250                        &vlan_size,
1251                        sizeof(uint32_t));
1252         }
1253
1254         /* L3 must be IPv4, IPv6 */
1255         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1256                 ipv4_spec = item->spec;
1257                 ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
1258
1259                 /* copy the ipv4 details */
1260                 if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
1261                                         BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
1262                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1263                         ulp_encap_buffer_copy(buff,
1264                                               def_ipv4_hdr,
1265                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1266                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1267                 } else {
1268                         const uint8_t *tmp_buff;
1269
1270                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
1271                         tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
1272                         ulp_encap_buffer_copy(buff,
1273                                               tmp_buff,
1274                                               BNXT_ULP_ENCAP_IPV4_ID_PROTO);
1275                         buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1276                              BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1277                         ulp_encap_buffer_copy(buff,
1278                                               &ipv4_spec->hdr.version_ihl,
1279                                               BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
1280                 }
1281                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
1282                     BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
1283                     BNXT_ULP_ENCAP_IPV4_ID_PROTO];
1284                 ulp_encap_buffer_copy(buff,
1285                                       (const uint8_t *)&ipv4_spec->hdr.dst_addr,
1286                                       BNXT_ULP_ENCAP_IPV4_DEST_IP);
1287
1288                 buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
1289                 ulp_encap_buffer_copy(buff,
1290                                       (const uint8_t *)&ipv4_spec->hdr.src_addr,
1291                                       BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
1292
1293                 /* Update the ip size details */
1294                 ip_size = tfp_cpu_to_be_32(ip_size);
1295                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1296                        &ip_size, sizeof(uint32_t));
1297
1298                 /* update the ip type */
1299                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4);
1300                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1301                        &ip_type, sizeof(uint32_t));
1302
1303                 /* update the computed field to notify it is ipv4 header */
1304                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1305                                     1);
1306
1307                 if (!ulp_rte_item_skip_void(&item, 1))
1308                         return BNXT_TF_RC_ERROR;
1309         } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1310                 ipv6_spec = item->spec;
1311                 ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
1312
1313                 /* copy the ipv4 details */
1314                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
1315                        ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
1316
1317                 /* Update the ip size details */
1318                 ip_size = tfp_cpu_to_be_32(ip_size);
1319                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
1320                        &ip_size, sizeof(uint32_t));
1321
1322                  /* update the ip type */
1323                 ip_type = rte_cpu_to_be_32(BNXT_ULP_ETH_IPV6);
1324                 memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L3_TYPE],
1325                        &ip_type, sizeof(uint32_t));
1326
1327                 /* update the computed field to notify it is ipv6 header */
1328                 ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1329                                     1);
1330
1331                 if (!ulp_rte_item_skip_void(&item, 1))
1332                         return BNXT_TF_RC_ERROR;
1333         } else {
1334                 BNXT_TF_DBG(ERR, "Parse Error: Vxlan Encap expects L3 hdr\n");
1335                 return BNXT_TF_RC_ERROR;
1336         }
1337
1338         /* L4 is UDP */
1339         if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
1340                 BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
1341                 return BNXT_TF_RC_ERROR;
1342         }
1343         /* copy the udp details */
1344         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
1345                               item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
1346
1347         if (!ulp_rte_item_skip_void(&item, 1))
1348                 return BNXT_TF_RC_ERROR;
1349
1350         /* Finally VXLAN */
1351         if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1352                 BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
1353                 return BNXT_TF_RC_ERROR;
1354         }
1355         vxlan_size = sizeof(struct rte_flow_item_vxlan);
1356         /* copy the vxlan details */
1357         memcpy(&vxlan_spec, item->spec, vxlan_size);
1358         vxlan_spec.flags = 0x08;
1359         ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
1360                               (const uint8_t *)&vxlan_spec,
1361                               vxlan_size);
1362         vxlan_size = tfp_cpu_to_be_32(vxlan_size);
1363         memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
1364                &vxlan_size, sizeof(uint32_t));
1365
1366         /* update the hdr_bitmap with vxlan */
1367         ULP_BITMAP_SET(act->bits, BNXT_ULP_ACTION_BIT_VXLAN_ENCAP);
1368         return BNXT_TF_RC_SUCCESS;
1369 }
1370
1371 /* Function to handle the parsing of RTE Flow action vxlan_encap Header */
1372 int32_t
1373 ulp_rte_vxlan_decap_act_handler(const struct rte_flow_action *action_item
1374                                 __rte_unused,
1375                                 struct ulp_rte_parser_params *params)
1376 {
1377         /* update the hdr_bitmap with vxlan */
1378         ULP_BITMAP_SET(params->act_bitmap.bits,
1379                        BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
1380         return BNXT_TF_RC_SUCCESS;
1381 }
1382
1383 /* Function to handle the parsing of RTE Flow action drop Header. */
1384 int32_t
1385 ulp_rte_drop_act_handler(const struct rte_flow_action *action_item __rte_unused,
1386                          struct ulp_rte_parser_params *params)
1387 {
1388         /* Update the hdr_bitmap with drop */
1389         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DROP);
1390         return BNXT_TF_RC_SUCCESS;
1391 }
1392
1393 /* Function to handle the parsing of RTE Flow action count. */
1394 int32_t
1395 ulp_rte_count_act_handler(const struct rte_flow_action *action_item,
1396                           struct ulp_rte_parser_params *params)
1397
1398 {
1399         const struct rte_flow_action_count *act_count;
1400         struct ulp_rte_act_prop *act_prop = &params->act_prop;
1401
1402         act_count = action_item->conf;
1403         if (act_count) {
1404                 if (act_count->shared) {
1405                         BNXT_TF_DBG(ERR,
1406                                     "Parse Error:Shared count not supported\n");
1407                         return BNXT_TF_RC_PARSE_ERR;
1408                 }
1409                 memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
1410                        &act_count->id,
1411                        BNXT_ULP_ACT_PROP_SZ_COUNT);
1412         }
1413
1414         /* Update the hdr_bitmap with count */
1415         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_COUNT);
1416         return BNXT_TF_RC_SUCCESS;
1417 }
1418
1419 /* Function to handle the parsing of action ports. */
1420 static int32_t
1421 ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
1422                             uint32_t ifindex)
1423 {
1424         enum bnxt_ulp_direction_type dir;
1425         uint16_t pid_s;
1426         uint32_t pid;
1427         struct ulp_rte_act_prop *act = &param->act_prop;
1428
1429         /* Get the direction */
1430         dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
1431         if (dir == BNXT_ULP_DIR_EGRESS) {
1432                 /* For egress direction, fill vport */
1433                 if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
1434                         return BNXT_TF_RC_ERROR;
1435
1436                 pid = pid_s;
1437                 pid = rte_cpu_to_be_32(pid);
1438                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1439                        &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1440         } else {
1441                 /* For ingress direction, fill vnic */
1442                 if (ulp_port_db_default_vnic_get(param->ulp_ctx, ifindex,
1443                                                  BNXT_ULP_DRV_FUNC_VNIC,
1444                                                  &pid_s))
1445                         return BNXT_TF_RC_ERROR;
1446
1447                 pid = pid_s;
1448                 pid = rte_cpu_to_be_32(pid);
1449                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_VNIC],
1450                        &pid, BNXT_ULP_ACT_PROP_SZ_VNIC);
1451         }
1452
1453         /* Update the action port set bit */
1454         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1455         return BNXT_TF_RC_SUCCESS;
1456 }
1457
1458 /* Function to handle the parsing of RTE Flow action PF. */
1459 int32_t
1460 ulp_rte_pf_act_handler(const struct rte_flow_action *action_item __rte_unused,
1461                        struct ulp_rte_parser_params *params)
1462 {
1463         uint32_t port_id;
1464         uint32_t ifindex;
1465         enum bnxt_ulp_intf_type intf_type;
1466
1467         /* Get the port id of the current device */
1468         port_id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
1469
1470         /* Get the port db ifindex */
1471         if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx, port_id,
1472                                               &ifindex)) {
1473                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1474                 return BNXT_TF_RC_ERROR;
1475         }
1476
1477         /* Check the port is PF port */
1478         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1479         if (intf_type != BNXT_ULP_INTF_TYPE_PF) {
1480                 BNXT_TF_DBG(ERR, "Port is not a PF port\n");
1481                 return BNXT_TF_RC_ERROR;
1482         }
1483         /* Update the action properties */
1484         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1485         return ulp_rte_parser_act_port_set(params, ifindex);
1486 }
1487
1488 /* Function to handle the parsing of RTE Flow action VF. */
1489 int32_t
1490 ulp_rte_vf_act_handler(const struct rte_flow_action *action_item,
1491                        struct ulp_rte_parser_params *params)
1492 {
1493         const struct rte_flow_action_vf *vf_action;
1494         uint32_t ifindex;
1495         enum bnxt_ulp_intf_type intf_type;
1496
1497         vf_action = action_item->conf;
1498         if (!vf_action) {
1499                 BNXT_TF_DBG(ERR, "ParseErr: Invalid Argument\n");
1500                 return BNXT_TF_RC_PARSE_ERR;
1501         }
1502
1503         if (vf_action->original) {
1504                 BNXT_TF_DBG(ERR, "ParseErr:VF Original not supported\n");
1505                 return BNXT_TF_RC_PARSE_ERR;
1506         }
1507
1508         /* Check the port is VF port */
1509         if (ulp_port_db_dev_func_id_to_ulp_index(params->ulp_ctx, vf_action->id,
1510                                                  &ifindex)) {
1511                 BNXT_TF_DBG(ERR, "VF is not valid interface\n");
1512                 return BNXT_TF_RC_ERROR;
1513         }
1514         intf_type = ulp_port_db_port_type_get(params->ulp_ctx, ifindex);
1515         if (intf_type != BNXT_ULP_INTF_TYPE_VF &&
1516             intf_type != BNXT_ULP_INTF_TYPE_TRUSTED_VF) {
1517                 BNXT_TF_DBG(ERR, "Port is not a VF port\n");
1518                 return BNXT_TF_RC_ERROR;
1519         }
1520
1521         /* Update the action properties */
1522         ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1523         return ulp_rte_parser_act_port_set(params, ifindex);
1524 }
1525
1526 /* Function to handle the parsing of RTE Flow action port_id. */
1527 int32_t
1528 ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
1529                             struct ulp_rte_parser_params *param)
1530 {
1531         const struct rte_flow_action_port_id *port_id = act_item->conf;
1532         uint32_t ifindex;
1533         enum bnxt_ulp_intf_type intf_type;
1534
1535         if (!port_id) {
1536                 BNXT_TF_DBG(ERR,
1537                             "ParseErr: Invalid Argument\n");
1538                 return BNXT_TF_RC_PARSE_ERR;
1539         }
1540         if (port_id->original) {
1541                 BNXT_TF_DBG(ERR,
1542                             "ParseErr:Portid Original not supported\n");
1543                 return BNXT_TF_RC_PARSE_ERR;
1544         }
1545
1546         /* Get the port db ifindex */
1547         if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
1548                                               &ifindex)) {
1549                 BNXT_TF_DBG(ERR, "Invalid port id\n");
1550                 return BNXT_TF_RC_ERROR;
1551         }
1552
1553         /* Get the intf type */
1554         intf_type = ulp_port_db_port_type_get(param->ulp_ctx, ifindex);
1555         if (!intf_type) {
1556                 BNXT_TF_DBG(ERR, "Invalid port type\n");
1557                 return BNXT_TF_RC_ERROR;
1558         }
1559
1560         /* Set the action port */
1561         ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
1562         return ulp_rte_parser_act_port_set(param, ifindex);
1563 }
1564
1565 /* Function to handle the parsing of RTE Flow action phy_port. */
1566 int32_t
1567 ulp_rte_phy_port_act_handler(const struct rte_flow_action *action_item,
1568                              struct ulp_rte_parser_params *prm)
1569 {
1570         const struct rte_flow_action_phy_port *phy_port;
1571         uint32_t pid;
1572         int32_t rc;
1573         uint16_t pid_s;
1574         enum bnxt_ulp_direction_type dir;
1575
1576         phy_port = action_item->conf;
1577         if (!phy_port) {
1578                 BNXT_TF_DBG(ERR,
1579                             "ParseErr: Invalid Argument\n");
1580                 return BNXT_TF_RC_PARSE_ERR;
1581         }
1582
1583         if (phy_port->original) {
1584                 BNXT_TF_DBG(ERR,
1585                             "Parse Err:Port Original not supported\n");
1586                 return BNXT_TF_RC_PARSE_ERR;
1587         }
1588         dir = ULP_COMP_FLD_IDX_RD(prm, BNXT_ULP_CF_IDX_DIRECTION);
1589         if (dir != BNXT_ULP_DIR_EGRESS) {
1590                 BNXT_TF_DBG(ERR,
1591                             "Parse Err:Phy ports are valid only for egress\n");
1592                 return BNXT_TF_RC_PARSE_ERR;
1593         }
1594         /* Get the physical port details from port db */
1595         rc = ulp_port_db_phy_port_vport_get(prm->ulp_ctx, phy_port->index,
1596                                             &pid_s);
1597         if (rc) {
1598                 BNXT_TF_DBG(ERR, "Failed to get port details\n");
1599                 return -EINVAL;
1600         }
1601
1602         pid = pid_s;
1603         pid = rte_cpu_to_be_32(pid);
1604         memcpy(&prm->act_prop.act_details[BNXT_ULP_ACT_PROP_IDX_VPORT],
1605                &pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
1606
1607         /* Update the action port set bit */
1608         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 1);
1609         ULP_COMP_FLD_IDX_WR(prm, BNXT_ULP_CF_IDX_ACT_PORT_TYPE,
1610                             BNXT_ULP_INTF_TYPE_PHY_PORT);
1611         return BNXT_TF_RC_SUCCESS;
1612 }
1613
1614 /* Function to handle the parsing of RTE Flow action pop vlan. */
1615 int32_t
1616 ulp_rte_of_pop_vlan_act_handler(const struct rte_flow_action *a __rte_unused,
1617                                 struct ulp_rte_parser_params *params)
1618 {
1619         /* Update the act_bitmap with pop */
1620         ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_POP_VLAN);
1621         return BNXT_TF_RC_SUCCESS;
1622 }
1623
1624 /* Function to handle the parsing of RTE Flow action push vlan. */
1625 int32_t
1626 ulp_rte_of_push_vlan_act_handler(const struct rte_flow_action *action_item,
1627                                  struct ulp_rte_parser_params *params)
1628 {
1629         const struct rte_flow_action_of_push_vlan *push_vlan;
1630         uint16_t ethertype;
1631         struct ulp_rte_act_prop *act = &params->act_prop;
1632
1633         push_vlan = action_item->conf;
1634         if (push_vlan) {
1635                 ethertype = push_vlan->ethertype;
1636                 if (tfp_cpu_to_be_16(ethertype) != RTE_ETHER_TYPE_VLAN) {
1637                         BNXT_TF_DBG(ERR,
1638                                     "Parse Err: Ethertype not supported\n");
1639                         return BNXT_TF_RC_PARSE_ERR;
1640                 }
1641                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_PUSH_VLAN],
1642                        &ethertype, BNXT_ULP_ACT_PROP_SZ_PUSH_VLAN);
1643                 /* Update the hdr_bitmap with push vlan */
1644                 ULP_BITMAP_SET(params->act_bitmap.bits,
1645                                BNXT_ULP_ACTION_BIT_PUSH_VLAN);
1646                 return BNXT_TF_RC_SUCCESS;
1647         }
1648         BNXT_TF_DBG(ERR, "Parse Error: Push vlan arg is invalid\n");
1649         return BNXT_TF_RC_ERROR;
1650 }
1651
1652 /* Function to handle the parsing of RTE Flow action set vlan id. */
1653 int32_t
1654 ulp_rte_of_set_vlan_vid_act_handler(const struct rte_flow_action *action_item,
1655                                     struct ulp_rte_parser_params *params)
1656 {
1657         const struct rte_flow_action_of_set_vlan_vid *vlan_vid;
1658         uint32_t vid;
1659         struct ulp_rte_act_prop *act = &params->act_prop;
1660
1661         vlan_vid = action_item->conf;
1662         if (vlan_vid && vlan_vid->vlan_vid) {
1663                 vid = vlan_vid->vlan_vid;
1664                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_VID],
1665                        &vid, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_VID);
1666                 /* Update the hdr_bitmap with vlan vid */
1667                 ULP_BITMAP_SET(params->act_bitmap.bits,
1668                                BNXT_ULP_ACTION_BIT_SET_VLAN_VID);
1669                 return BNXT_TF_RC_SUCCESS;
1670         }
1671         BNXT_TF_DBG(ERR, "Parse Error: Vlan vid arg is invalid\n");
1672         return BNXT_TF_RC_ERROR;
1673 }
1674
1675 /* Function to handle the parsing of RTE Flow action set vlan pcp. */
1676 int32_t
1677 ulp_rte_of_set_vlan_pcp_act_handler(const struct rte_flow_action *action_item,
1678                                     struct ulp_rte_parser_params *params)
1679 {
1680         const struct rte_flow_action_of_set_vlan_pcp *vlan_pcp;
1681         uint8_t pcp;
1682         struct ulp_rte_act_prop *act = &params->act_prop;
1683
1684         vlan_pcp = action_item->conf;
1685         if (vlan_pcp) {
1686                 pcp = vlan_pcp->vlan_pcp;
1687                 memcpy(&act->act_details[BNXT_ULP_ACT_PROP_IDX_SET_VLAN_PCP],
1688                        &pcp, BNXT_ULP_ACT_PROP_SZ_SET_VLAN_PCP);
1689                 /* Update the hdr_bitmap with vlan vid */
1690                 ULP_BITMAP_SET(params->act_bitmap.bits,
1691                                BNXT_ULP_ACTION_BIT_SET_VLAN_PCP);
1692                 return BNXT_TF_RC_SUCCESS;
1693         }
1694         BNXT_TF_DBG(ERR, "Parse Error: Vlan pcp arg is invalid\n");
1695         return BNXT_TF_RC_ERROR;
1696 }