/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2014-2020 Broadcom
+ * Copyright(c) 2014-2021 Broadcom
* All rights reserved.
*/
+#include <rte_vxlan.h>
#include "bnxt.h"
#include "ulp_template_db_enum.h"
#include "ulp_template_struct.h"
+#include "bnxt_ulp.h"
#include "bnxt_tf_common.h"
#include "ulp_rte_parser.h"
+#include "ulp_matcher.h"
#include "ulp_utils.h"
#include "tfp.h"
#include "ulp_port_db.h"
+#include "ulp_flow_db.h"
+#include "ulp_mapper.h"
+#include "ulp_tun.h"
/* Local defines for the parsing functions */
#define ULP_VLAN_PRIORITY_SHIFT 13 /* First 3 bits */
*idx = *idx + 1;
}
+/* Utility function to ignore field masks items */
+static void
+ulp_rte_prsr_mask_ignore(struct ulp_rte_parser_params *params __rte_unused,
+ uint32_t *idx,
+ const void *buffer __rte_unused,
+ uint32_t size __rte_unused)
+{
+ *idx = *idx + 1;
+}
+
/*
* Function to handle the parsing of RTE Flows and placing
* the RTE flow items into the ulp structures.
{
uint32_t ifindex;
uint16_t port_id, parif;
+ uint32_t mtype;
enum bnxt_ulp_direction_type dir;
/* get the direction details */
dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
+ /* read the port id details */
+ port_id = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_INCOMING_IF);
+ if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
+ port_id,
+ &ifindex)) {
+ BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
+ return;
+ }
+
if (dir == BNXT_ULP_DIR_INGRESS) {
- /* read the port id details */
- port_id = ULP_COMP_FLD_IDX_RD(params,
- BNXT_ULP_CF_IDX_INCOMING_IF);
- if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
- port_id,
- &ifindex)) {
- BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
- return;
- }
/* Set port PARIF */
if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
BNXT_ULP_PHY_PORT_PARIF, &parif)) {
BNXT_TF_DBG(ERR, "ParseErr:ifindex is not valid\n");
return;
}
- /* Parif needs to be reset to a free partition */
- parif += BNXT_ULP_FREE_PARIF_BASE;
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_PHY_PORT_PARIF,
parif);
+ } else {
+ /* Get the match port type */
+ mtype = ULP_COMP_FLD_IDX_RD(params,
+ BNXT_ULP_CF_IDX_MATCH_PORT_TYPE);
+ if (mtype == BNXT_ULP_INTF_TYPE_VF_REP) {
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_MATCH_PORT_IS_VFREP,
+ 1);
+ /* Set VF func PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_VF_FUNC_PARIF,
+ &parif)) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_VF_FUNC_PARIF,
+ parif);
+
+ /* populate the loopback parif */
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_LOOPBACK_PARIF,
+ BNXT_ULP_SYM_VF_FUNC_PARIF);
+
+ } else {
+ /* Set DRV func PARIF */
+ if (ulp_port_db_parif_get(params->ulp_ctx, ifindex,
+ BNXT_ULP_DRV_FUNC_PARIF,
+ &parif)) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:ifindex is not valid\n");
+ return;
+ }
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_DRV_FUNC_PARIF,
+ parif);
+ }
}
}
-/*
- * Function to handle the post processing of the parsing details
- */
-int32_t
-bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
+static int32_t
+ulp_post_process_normal_flow(struct ulp_rte_parser_params *params)
{
- enum bnxt_ulp_direction_type dir;
enum bnxt_ulp_intf_type match_port_type, act_port_type;
+ enum bnxt_ulp_direction_type dir;
uint32_t act_port_set;
/* Get the computed details */
return 0;
}
+/*
+ * Function to handle the post processing of the parsing details
+ */
+int32_t
+bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
+{
+ ulp_post_process_normal_flow(params);
+ return ulp_post_process_tun_flow(params);
+}
+
/*
* Function to compute the flow direction based on the match port details
*/
}
}
+/* Internal Function to identify broadcast or multicast packets */
+static int32_t
+ulp_rte_parser_is_bcmc_addr(const struct rte_ether_addr *eth_addr)
+{
+ if (rte_is_multicast_ether_addr(eth_addr) ||
+ rte_is_broadcast_ether_addr(eth_addr)) {
+ BNXT_TF_DBG(DEBUG,
+ "No support for bcast or mcast addr offload\n");
+ return 1;
+ }
+ return 0;
+}
+
/* Function to handle the parsing of RTE Flow item Ethernet Header. */
int32_t
ulp_rte_eth_hdr_handler(const struct rte_flow_item *item,
field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
eth_spec->dst.addr_bytes,
size);
+ /* Todo: work around to avoid multicast and broadcast addr */
+ if (ulp_rte_parser_is_bcmc_addr(ð_spec->dst))
+ return BNXT_TF_RC_PARSE_ERR;
+
size = sizeof(eth_spec->src.addr_bytes);
field = ulp_rte_parser_fld_copy(field,
eth_spec->src.addr_bytes,
size);
+ /* Todo: work around to avoid multicast and broadcast addr */
+ if (ulp_rte_parser_is_bcmc_addr(ð_spec->src))
+ return BNXT_TF_RC_PARSE_ERR;
+
field = ulp_rte_parser_fld_copy(field,
ð_spec->type,
sizeof(eth_spec->type));
params->field_idx += BNXT_ULP_PROTO_HDR_VLAN_NUM;
/* Update the protocol hdr bitmap */
- if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH)) {
+ if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_ETH) ||
+ ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_IPV4) ||
+ ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_IPV6) ||
+ ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_UDP) ||
+ ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_TCP)) {
ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_I_ETH);
inner_flag = 1;
} else {
vlan_tag |= ~ULP_VLAN_TAG_MASK;
vlan_tag = htons(vlan_tag);
- ulp_rte_prsr_mask_copy(params, &idx, &priority,
- sizeof(priority));
+ /*
+ * The priority field is ignored since OVS is setting it as
+ * wild card match and it is not supported. This is a work
+ * around and shall be addressed in the future.
+ */
+ ulp_rte_prsr_mask_ignore(params, &idx, &priority,
+ sizeof(priority));
+
ulp_rte_prsr_mask_copy(params, &idx, &vlan_tag,
sizeof(vlan_tag));
ulp_rte_prsr_mask_copy(params, &idx, &vlan_mask->inner_type,
return BNXT_TF_RC_ERROR;
}
+ if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_ETH) &&
+ !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_I_ETH)) {
+ /* Since F2 flow does not include eth item, when parser detects
+ * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
+ * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
+ * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
+ * This will allow the parser post processor to update the
+ * t_dmac in hdr_field[o_eth.dmac]
+ */
+ idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
+ BNXT_ULP_PROTO_HDR_VLAN_NUM);
+ params->field_idx = idx;
+ }
+
/*
* Copy the rte_flow_item for ipv4 into hdr_field using ipv4
* header fields
ulp_rte_prsr_mask_copy(params, &idx,
&ipv4_mask->hdr.version_ihl,
sizeof(ipv4_mask->hdr.version_ihl));
-#ifdef ULP_DONT_IGNORE_TOS
- ulp_rte_prsr_mask_copy(params, &idx,
- &ipv4_mask->hdr.type_of_service,
- sizeof(ipv4_mask->hdr.type_of_service));
-#else
/*
* The tos field is ignored since OVS is setting it as wild card
* match and it is not supported. This is a work around and
* shall be addressed in the future.
*/
- idx += 1;
-#endif
+ ulp_rte_prsr_mask_ignore(params, &idx,
+ &ipv4_mask->hdr.type_of_service,
+ sizeof(ipv4_mask->hdr.type_of_service)
+ );
ulp_rte_prsr_mask_copy(params, &idx,
&ipv4_mask->hdr.total_length,
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
}
+ /* Some of the PMD applications may set the protocol field
+ * in the IPv4 spec but don't set the mask. So, consider
+ * the mask in the proto value calculation.
+ */
+ if (ipv4_mask)
+ proto &= ipv4_mask->hdr.next_proto_id;
+
/* Update the field protocol hdr bitmap */
ulp_rte_l3_proto_type_update(params, proto, inner_flag);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
return BNXT_TF_RC_ERROR;
}
+ if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_O_ETH) &&
+ !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_I_ETH)) {
+ /* Since F2 flow does not include eth item, when parser detects
+ * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
+ * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
+ * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
+ * This will allow the parser post processor to update the
+ * t_dmac in hdr_field[o_eth.dmac]
+ */
+ idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
+ BNXT_ULP_PROTO_HDR_VLAN_NUM);
+ params->field_idx = idx;
+ }
+
/*
* Copy the rte_flow_item for ipv6 into hdr_field using ipv6
* header fields
ulp_rte_prsr_mask_copy(params, &idx,
&vtcf_mask,
size);
-
+ /*
+ * The TC and flow label field are ignored since OVS is
+ * setting it for match and it is not supported.
+ * This is a work around and
+ * shall be addressed in the future.
+ */
vtcf_mask = BNXT_ULP_GET_IPV6_TC(ipv6_mask->hdr.vtc_flow);
- ulp_rte_prsr_mask_copy(params, &idx,
- &vtcf_mask,
- size);
-
+ ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
vtcf_mask =
BNXT_ULP_GET_IPV6_FLOWLABEL(ipv6_mask->hdr.vtc_flow);
- ulp_rte_prsr_mask_copy(params, &idx,
- &vtcf_mask,
- size);
+ ulp_rte_prsr_mask_ignore(params, &idx, &vtcf_mask, size);
ulp_rte_prsr_mask_copy(params, &idx,
&ipv6_mask->hdr.payload_len,
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
}
+ /* Some of the PMD applications may set the protocol field
+ * in the IPv6 spec but don't set the mask. So, consider
+ * the mask in proto value calculation.
+ */
+ if (ipv6_mask)
+ proto &= ipv6_mask->hdr.proto;
+
/* Update the field protocol hdr bitmap */
ulp_rte_l3_proto_type_update(params, proto, inner_flag);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_HDR_CNT, ++cnt);
ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
uint16_t dst_port)
{
- if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
+ if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
ULP_BITMAP_SET(param->hdr_fp_bit.bits,
BNXT_ULP_HDR_BIT_T_VXLAN);
+ ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ }
}
/* Function to handle the parsing of RTE Flow item UDP Header. */
field = ulp_rte_parser_fld_copy(¶ms->hdr_field[idx],
&udp_spec->hdr.src_port,
size);
+
size = sizeof(udp_spec->hdr.dst_port);
field = ulp_rte_parser_fld_copy(field,
&udp_spec->hdr.dst_port,
/* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
0x00, 0x40, 0x11};
+ /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
+ const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
+ 0x00, 0x11, 0xf6};
struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
struct ulp_rte_act_prop *ap = ¶ms->act_prop;
+ const uint8_t *tmp_buff;
vxlan_encap = action_item->conf;
if (!vxlan_encap) {
buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
ulp_encap_buffer_copy(buff,
eth_spec->dst.addr_bytes,
- BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC);
+ BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
+ ULP_BUFFER_ALIGN_8_BYTE);
buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
ulp_encap_buffer_copy(buff,
eth_spec->src.addr_bytes,
- BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC);
+ BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
+ ULP_BUFFER_ALIGN_8_BYTE);
/* Goto the next item */
if (!ulp_rte_item_skip_void(&item, 1))
buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
ulp_encap_buffer_copy(buff,
item->spec,
- sizeof(struct rte_flow_item_vlan));
+ sizeof(struct rte_vlan_hdr),
+ ULP_BUFFER_ALIGN_8_BYTE);
if (!ulp_rte_item_skip_void(&item, 1))
return BNXT_TF_RC_ERROR;
if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
vlan_num++;
memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
- sizeof(struct rte_flow_item_vlan)],
+ sizeof(struct rte_vlan_hdr)],
item->spec,
- sizeof(struct rte_flow_item_vlan));
+ sizeof(struct rte_vlan_hdr));
if (!ulp_rte_item_skip_void(&item, 1))
return BNXT_TF_RC_ERROR;
}
/* Update the vlan count and size of more than one */
if (vlan_num) {
- vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
+ vlan_size = vlan_num * sizeof(struct rte_vlan_hdr);
vlan_num = tfp_cpu_to_be_32(vlan_num);
memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG_NUM],
&vlan_num,
ulp_encap_buffer_copy(buff,
def_ipv4_hdr,
BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
- BNXT_ULP_ENCAP_IPV4_ID_PROTO);
+ BNXT_ULP_ENCAP_IPV4_ID_PROTO,
+ ULP_BUFFER_ALIGN_8_BYTE);
} else {
- const uint8_t *tmp_buff;
-
+ /* Total length being ignored in the ip hdr. */
buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
ulp_encap_buffer_copy(buff,
tmp_buff,
- BNXT_ULP_ENCAP_IPV4_ID_PROTO);
+ BNXT_ULP_ENCAP_IPV4_ID_PROTO,
+ ULP_BUFFER_ALIGN_8_BYTE);
buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
BNXT_ULP_ENCAP_IPV4_ID_PROTO];
ulp_encap_buffer_copy(buff,
&ipv4_spec->hdr.version_ihl,
- BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS);
+ BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
+ ULP_BUFFER_ALIGN_8_BYTE);
}
+
+ /* Update the dst ip address in ip encap buffer */
buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
BNXT_ULP_ENCAP_IPV4_ID_PROTO];
ulp_encap_buffer_copy(buff,
(const uint8_t *)&ipv4_spec->hdr.dst_addr,
- BNXT_ULP_ENCAP_IPV4_DEST_IP);
+ sizeof(ipv4_spec->hdr.dst_addr),
+ ULP_BUFFER_ALIGN_8_BYTE);
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
+ /* Update the src ip address */
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
+ BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
+ sizeof(ipv4_spec->hdr.src_addr)];
ulp_encap_buffer_copy(buff,
(const uint8_t *)&ipv4_spec->hdr.src_addr,
- BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC);
+ sizeof(ipv4_spec->hdr.src_addr),
+ ULP_BUFFER_ALIGN_8_BYTE);
/* Update the ip size details */
ip_size = tfp_cpu_to_be_32(ip_size);
ipv6_spec = item->spec;
ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
- /* copy the ipv4 details */
- memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP],
- ipv6_spec, BNXT_ULP_ENCAP_IPV6_SIZE);
+ /* copy the ipv6 details */
+ tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
+ if (ulp_buffer_is_empty(tmp_buff,
+ BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
+ ulp_encap_buffer_copy(buff,
+ def_ipv6_hdr,
+ sizeof(def_ipv6_hdr),
+ ULP_BUFFER_ALIGN_8_BYTE);
+ } else {
+ /* The payload length being ignored in the ip hdr. */
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
+ tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
+ ulp_encap_buffer_copy(buff,
+ tmp_buff,
+ BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
+ ULP_BUFFER_ALIGN_8_BYTE);
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
+ BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
+ BNXT_ULP_ENCAP_IPV6_DO];
+ tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
+ ulp_encap_buffer_copy(buff,
+ tmp_buff,
+ BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
+ ULP_BUFFER_ALIGN_8_BYTE);
+ }
+ /* Update the dst ip address in ip encap buffer */
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
+ sizeof(def_ipv6_hdr)];
+ ulp_encap_buffer_copy(buff,
+ (const uint8_t *)ipv6_spec->hdr.dst_addr,
+ sizeof(ipv6_spec->hdr.dst_addr),
+ ULP_BUFFER_ALIGN_8_BYTE);
+
+ /* Update the src ip address */
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
+ ulp_encap_buffer_copy(buff,
+ (const uint8_t *)ipv6_spec->hdr.src_addr,
+ sizeof(ipv6_spec->hdr.src_addr),
+ ULP_BUFFER_ALIGN_16_BYTE);
/* Update the ip size details */
ip_size = tfp_cpu_to_be_32(ip_size);
}
/* copy the udp details */
ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
- item->spec, BNXT_ULP_ENCAP_UDP_SIZE);
+ item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
+ ULP_BUFFER_ALIGN_8_BYTE);
if (!ulp_rte_item_skip_void(&item, 1))
return BNXT_TF_RC_ERROR;
BNXT_TF_DBG(ERR, "vxlan encap does not have vni\n");
return BNXT_TF_RC_ERROR;
}
- vxlan_size = sizeof(struct rte_flow_item_vxlan);
+ vxlan_size = sizeof(struct rte_vxlan_hdr);
/* copy the vxlan details */
memcpy(&vxlan_spec, item->spec, vxlan_size);
vxlan_spec.flags = 0x08;
- ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN],
- (const uint8_t *)&vxlan_spec,
- vxlan_size);
+ buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
+ if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
+ ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
+ vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
+ } else {
+ ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
+ vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
+ ulp_encap_buffer_copy(buff + (vxlan_size / 2),
+ (const uint8_t *)&vxlan_spec.vni,
+ vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
+ }
vxlan_size = tfp_cpu_to_be_32(vxlan_size);
memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
&vxlan_size, sizeof(uint32_t));
/* update the hdr_bitmap with vxlan */
ULP_BITMAP_SET(params->act_bitmap.bits,
BNXT_ULP_ACTION_BIT_VXLAN_DECAP);
+ /* Update computational field with tunnel decap info */
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN_DECAP, 1);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
return BNXT_TF_RC_SUCCESS;
}
ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_DEC_TTL);
return BNXT_TF_RC_SUCCESS;
}
+
+/* Function to handle the parsing of RTE Flow action JUMP */
+int32_t
+ulp_rte_jump_act_handler(const struct rte_flow_action *action_item __rte_unused,
+ struct ulp_rte_parser_params *params)
+{
+ /* Update the act_bitmap with dec ttl */
+ ULP_BITMAP_SET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
+ return BNXT_TF_RC_SUCCESS;
+}