X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fbnxt%2Ftf_ulp%2Fulp_rte_parser.c;h=3fb29c0cb42edd44b9079308acbea70bb9d553cf;hb=255b8f86eb6e;hp=df38b83700fc2029fd4a7ac4f34eb32c2d7e5295;hpb=675e31d877b602bba8cde1d93e2cc773fd137bb0;p=dpdk.git diff --git a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c index df38b83700..3fb29c0cb4 100644 --- a/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c +++ b/drivers/net/bnxt/tf_ulp/ulp_rte_parser.c @@ -1,8 +1,9 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014-2020 Broadcom + * Copyright(c) 2014-2021 Broadcom * All rights reserved. */ +#include #include "bnxt.h" #include "ulp_template_db_enum.h" #include "ulp_template_struct.h" @@ -1012,6 +1013,13 @@ ulp_rte_ipv4_hdr_handler(const struct rte_flow_item *item, ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); } + /* Some of the PMD applications may set the protocol field + * in the IPv4 spec but don't set the mask. So, consider + * the mask in the proto value calculation. + */ + if (ipv4_mask) + proto &= ipv4_mask->hdr.next_proto_id; + /* Update the field protocol hdr bitmap */ ulp_rte_l3_proto_type_update(params, proto, inner_flag); ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); @@ -1150,6 +1158,13 @@ ulp_rte_ipv6_hdr_handler(const struct rte_flow_item *item, ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1); } + /* Some of the PMD applications may set the protocol field + * in the IPv6 spec but don't set the mask. So, consider + * the mask in proto value calculation. + */ + if (ipv6_mask) + proto &= ipv6_mask->hdr.proto; + /* Update the field protocol hdr bitmap */ ulp_rte_l3_proto_type_update(params, proto, inner_flag); ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt); @@ -1534,7 +1549,7 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG]; ulp_encap_buffer_copy(buff, item->spec, - sizeof(struct rte_flow_item_vlan), + sizeof(struct rte_vlan_hdr), ULP_BUFFER_ALIGN_8_BYTE); if (!ulp_rte_item_skip_void(&item, 1)) @@ -1545,15 +1560,15 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) { vlan_num++; memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG + - sizeof(struct rte_flow_item_vlan)], + sizeof(struct rte_vlan_hdr)], item->spec, - sizeof(struct rte_flow_item_vlan)); + sizeof(struct rte_vlan_hdr)); if (!ulp_rte_item_skip_void(&item, 1)) return BNXT_TF_RC_ERROR; } /* Update the vlan count and size of more than one */ if (vlan_num) { - vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan); + vlan_size = vlan_num * sizeof(struct rte_vlan_hdr); vlan_num = tfp_cpu_to_be_32(vlan_num); memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM], &vlan_num, @@ -1712,7 +1727,7 @@ ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item, BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n"); return BNXT_TF_RC_ERROR; } - vxlan_size = sizeof(struct rte_flow_item_vxlan); + vxlan_size = sizeof(struct rte_vxlan_hdr); /* copy the vxlan details */ memcpy(&vxlan_spec, item->spec, vxlan_size); vxlan_spec.flags = 0x08;