#include "ulp_template_struct.h"
#include "bnxt_ulp.h"
#include "bnxt_tf_common.h"
+#include "bnxt_tf_pmd_shim.h"
#include "ulp_rte_parser.h"
#include "ulp_matcher.h"
#include "ulp_utils.h"
return 0;
}
+/* Utility function to copy field spec items */
+static struct ulp_rte_hdr_field *
+ulp_rte_parser_fld_copy(struct ulp_rte_hdr_field *field,
+ const void *buffer,
+ uint32_t size)
+{
+ field->size = size;
+ memcpy(field->spec, buffer, field->size);
+ field++;
+ return field;
+}
+
/* Utility function to update the field_bitmap */
static void
ulp_rte_parser_field_bitmap_update(struct ulp_rte_parser_params *params,
/* Parse all the items in the pattern */
while (item && item->type != RTE_FLOW_ITEM_TYPE_END) {
- /* get the header information from the flow_hdr_info table */
- hdr_info = &ulp_hdr_info[item->type];
+ if (item->type >= (uint32_t)
+ BNXT_RTE_FLOW_ITEM_TYPE_END) {
+ if (item->type >=
+ (uint32_t)BNXT_RTE_FLOW_ITEM_TYPE_LAST)
+ goto hdr_parser_error;
+ /* get the header information */
+ hdr_info = &ulp_vendor_hdr_info[item->type -
+ BNXT_RTE_FLOW_ITEM_TYPE_END];
+ } else {
+ if (item->type > RTE_FLOW_ITEM_TYPE_HIGIG2)
+ goto hdr_parser_error;
+ hdr_info = &ulp_hdr_info[item->type];
+ }
if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_NOT_SUPPORTED) {
- BNXT_TF_DBG(ERR,
- "Truflow parser does not support type %d\n",
- item->type);
- return BNXT_TF_RC_PARSE_ERR;
+ goto hdr_parser_error;
} else if (hdr_info->hdr_type == BNXT_ULP_HDR_TYPE_SUPPORTED) {
/* call the registered callback handler */
if (hdr_info->proto_hdr_func) {
}
/* update the implied SVIF */
return ulp_rte_parser_implicit_match_port_process(params);
+
+hdr_parser_error:
+ BNXT_TF_DBG(ERR, "Truflow parser does not support type %d\n",
+ item->type);
+ return BNXT_TF_RC_PARSE_ERR;
}
/*
/* Parse all the items in the pattern */
while (action_item && action_item->type != RTE_FLOW_ACTION_TYPE_END) {
- /* get the header information from the flow_hdr_info table */
- hdr_info = &ulp_act_info[action_item->type];
- if (hdr_info->act_type ==
- BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
- BNXT_TF_DBG(ERR,
- "Truflow parser does not support act %u\n",
- action_item->type);
- return BNXT_TF_RC_ERROR;
- } else if (hdr_info->act_type ==
- BNXT_ULP_ACT_TYPE_SUPPORTED) {
+ if (action_item->type >=
+ (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_END) {
+ if (action_item->type >=
+ (uint32_t)BNXT_RTE_FLOW_ACTION_TYPE_LAST)
+ goto act_parser_error;
+ /* get the header information from bnxt actinfo table */
+ hdr_info = &ulp_vendor_act_info[action_item->type -
+ BNXT_RTE_FLOW_ACTION_TYPE_END];
+ } else {
+ if (action_item->type > RTE_FLOW_ACTION_TYPE_SHARED)
+ goto act_parser_error;
+ /* get the header information from the act info table */
+ hdr_info = &ulp_act_info[action_item->type];
+ }
+ if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_NOT_SUPPORTED) {
+ goto act_parser_error;
+ } else if (hdr_info->act_type == BNXT_ULP_ACT_TYPE_SUPPORTED) {
/* call the registered callback handler */
if (hdr_info->proto_act_func) {
if (hdr_info->proto_act_func(action_item,
/* update the implied port details */
ulp_rte_parser_implicit_act_port_process(params);
return BNXT_TF_RC_SUCCESS;
+
+act_parser_error:
+ BNXT_TF_DBG(ERR, "Truflow parser does not support act %u\n",
+ action_item->type);
+ return BNXT_TF_RC_ERROR;
}
/*
/*
* Function to handle the post processing of the parsing details
*/
-int32_t
+void
bnxt_ulp_rte_parser_post_process(struct ulp_rte_parser_params *params)
{
ulp_post_process_normal_flow(params);
- return ulp_post_process_tun_flow(params);
}
/*
static int32_t
ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
uint32_t ifindex,
- uint16_t mask)
+ uint16_t mask,
+ enum bnxt_ulp_direction_type item_dir)
{
uint16_t svif;
enum bnxt_ulp_direction_type dir;
bnxt_ulp_rte_parser_direction_compute(params);
/* Get the computed direction */
- dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
- if (dir == BNXT_ULP_DIR_INGRESS) {
+ dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
+ ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
+ if (dir == BNXT_ULP_DIR_INGRESS &&
+ port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
svif_type = BNXT_ULP_PHY_PORT_SVIF;
} else {
- if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
+ if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
+ item_dir != BNXT_ULP_DIR_EGRESS)
svif_type = BNXT_ULP_VF_FUNC_SVIF;
else
svif_type = BNXT_ULP_DRV_FUNC_SVIF;
}
/* Update the SVIF details */
- rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
+ rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
+ BNXT_ULP_DIR_INVALID);
return rc;
}
return BNXT_TF_RC_SUCCESS;
}
port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
+ action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
action_item.conf = &port_id;
/* Update the action port based on incoming port */
- ulp_rte_port_id_act_handler(&action_item, params);
+ ulp_rte_port_act_handler(&action_item, params);
/* Reset the action port set bit */
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
}
/* Update the SVIF details */
- return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
+ return ulp_rte_parser_svif_set(params, ifindex, svif_mask,
+ BNXT_ULP_DIR_INVALID);
}
/* Function to handle the parsing of RTE Flow item VF Header. */
return rc;
}
/* Update the SVIF details */
- return ulp_rte_parser_svif_set(params, ifindex, mask);
+ return ulp_rte_parser_svif_set(params, ifindex, mask,
+ BNXT_ULP_DIR_INVALID);
}
-/* Function to handle the parsing of RTE Flow item port id Header. */
+/* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
int32_t
-ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
- struct ulp_rte_parser_params *params)
+ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
+ struct ulp_rte_parser_params *params)
{
- const struct rte_flow_item_port_id *port_spec = item->spec;
- const struct rte_flow_item_port_id *port_mask = item->mask;
+ enum bnxt_ulp_direction_type item_dir;
+ uint16_t ethdev_id;
uint16_t mask = 0;
int32_t rc = BNXT_TF_RC_PARSE_ERR;
uint32_t ifindex;
- if (!port_spec) {
- BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
+ if (!item->spec) {
+ BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n");
return rc;
}
- if (!port_mask) {
- BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
+ if (!item->mask) {
+ BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n");
+ return rc;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_PORT_ID: {
+ const struct rte_flow_item_port_id *port_spec = item->spec;
+ const struct rte_flow_item_port_id *port_mask = item->mask;
+
+ item_dir = BNXT_ULP_DIR_INVALID;
+ ethdev_id = port_spec->id;
+ mask = port_mask->id;
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
+ const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
+ const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
+
+ item_dir = BNXT_ULP_DIR_INGRESS;
+ ethdev_id = ethdev_spec->port_id;
+ mask = ethdev_mask->port_id;
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
+ const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
+ const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
+
+ item_dir = BNXT_ULP_DIR_EGRESS;
+ ethdev_id = ethdev_spec->port_id;
+ mask = ethdev_mask->port_id;
+ break;
+ }
+ default:
+ BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n");
return rc;
}
- mask = port_mask->id;
/* perform the conversion from dpdk port to bnxt ifindex */
if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
- port_spec->id,
+ ethdev_id,
&ifindex)) {
BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
return rc;
}
/* Update the SVIF details */
- return ulp_rte_parser_svif_set(params, ifindex, mask);
+ return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
}
/* Function to handle the parsing of RTE Flow item phy port Header. */
{
const struct rte_flow_item_eth *eth_spec = item->spec;
const struct rte_flow_item_eth *eth_mask = item->mask;
- uint32_t idx = 0;
+ uint32_t idx = 0, dmac_idx = 0;
uint32_t size;
uint16_t eth_type = 0;
uint32_t inner_flag = 0;
* Copy the rte_flow_item for eth into hdr_field using ethernet
* header fields
*/
+ dmac_idx = idx;
size = sizeof(((struct rte_flow_item_eth *)NULL)->dst.addr_bytes);
ulp_rte_prsr_fld_mask(params, &idx, size,
ulp_deference_struct(eth_spec, dst.addr_bytes),
inner_flag = 1;
} else {
ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID,
+ dmac_idx);
}
/* Update the field protocol hdr bitmap */
ulp_rte_l2_proto_type_update(params, eth_type, inner_flag);
*/
ulp_rte_prsr_fld_mask(params, &idx, size,
&priority,
- &priority_mask,
+ (vlan_mask) ? &priority_mask : NULL,
ULP_PRSR_ACT_MASK_IGNORE);
ulp_rte_prsr_fld_mask(params, &idx, size,
&vlan_tag,
- &vlan_tag_mask,
+ (vlan_mask) ? &vlan_tag_mask : NULL,
ULP_PRSR_ACT_DEFAULT);
size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
ULP_BITMAP_SET(params->hdr_bitmap.bits,
BNXT_ULP_HDR_BIT_OO_VLAN);
+ if (vlan_mask && vlan_tag_mask)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
+
} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
!ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
outer_vtag_num == 1) {
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
ULP_BITMAP_SET(params->hdr_bitmap.bits,
BNXT_ULP_HDR_BIT_OI_VLAN);
+ if (vlan_mask && vlan_tag_mask)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
+
} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
!inner_vtag_num) {
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
ULP_BITMAP_SET(params->hdr_bitmap.bits,
BNXT_ULP_HDR_BIT_IO_VLAN);
+ if (vlan_mask && vlan_tag_mask)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
inner_flag = 1;
} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
ULP_BITMAP_SET(params->hdr_bitmap.bits,
BNXT_ULP_HDR_BIT_II_VLAN);
+ if (vlan_mask && vlan_tag_mask)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
inner_flag = 1;
} else {
BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
const struct rte_flow_item_ipv4 *ipv4_spec = item->spec;
const struct rte_flow_item_ipv4 *ipv4_mask = item->mask;
struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
- uint32_t idx = 0;
+ uint32_t idx = 0, dip_idx = 0;
uint32_t size;
uint8_t proto = 0;
uint32_t inner_flag = 0;
return BNXT_TF_RC_ERROR;
}
- if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
- BNXT_ULP_HDR_BIT_O_ETH) &&
- !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
- BNXT_ULP_HDR_BIT_I_ETH)) {
- /* Since F2 flow does not include eth item, when parser detects
- * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
- * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
- * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
- * This will allow the parser post processor to update the
- * t_dmac in hdr_field[o_eth.dmac]
- */
- idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
- BNXT_ULP_PROTO_HDR_VLAN_NUM);
- params->field_idx = idx;
- }
-
if (ulp_rte_prsr_fld_size_validate(params, &idx,
BNXT_ULP_PROTO_HDR_IPV4_NUM)) {
BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
ulp_deference_struct(ipv4_mask, hdr.src_addr),
ULP_PRSR_ACT_DEFAULT);
+ dip_idx = idx;
size = sizeof(((struct rte_flow_item_ipv4 *)NULL)->hdr.dst_addr);
ulp_rte_prsr_fld_mask(params, &idx, size,
ulp_deference_struct(ipv4_spec, hdr.dst_addr),
} else {
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV4);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
+ /* Update the tunnel offload dest ip offset */
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
+ dip_idx);
}
/* Some of the PMD applications may set the protocol field
const struct rte_flow_item_ipv6 *ipv6_spec = item->spec;
const struct rte_flow_item_ipv6 *ipv6_mask = item->mask;
struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
- uint32_t idx = 0;
+ uint32_t idx = 0, dip_idx = 0;
uint32_t size;
uint32_t ver_spec = 0, ver_mask = 0;
uint32_t tc_spec = 0, tc_mask = 0;
return BNXT_TF_RC_ERROR;
}
- if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
- BNXT_ULP_HDR_BIT_O_ETH) &&
- !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
- BNXT_ULP_HDR_BIT_I_ETH)) {
- /* Since F2 flow does not include eth item, when parser detects
- * IPv4/IPv6 item list and it belongs to the outer header; i.e.,
- * o_ipv4/o_ipv6, check if O_ETH and I_ETH is set. If not set,
- * then add offset sizeof(o_eth/oo_vlan/oi_vlan) to the index.
- * This will allow the parser post processor to update the
- * t_dmac in hdr_field[o_eth.dmac]
- */
- idx += (BNXT_ULP_PROTO_HDR_ETH_NUM +
- BNXT_ULP_PROTO_HDR_VLAN_NUM);
- params->field_idx = idx;
- }
-
if (ulp_rte_prsr_fld_size_validate(params, &idx,
BNXT_ULP_PROTO_HDR_IPV6_NUM)) {
BNXT_TF_DBG(ERR, "Error parsing protocol header\n");
ulp_rte_prsr_fld_mask(params, &idx, size, &ver_spec, &ver_mask,
ULP_PRSR_ACT_DEFAULT);
/*
- * The TC and flow label field are ignored since OVS is setting
- * it for match and it is not supported.
+ * The TC and flow label field are ignored since OVS is
+ * setting it for match and it is not supported.
* This is a work around and
* shall be addressed in the future.
*/
ulp_deference_struct(ipv6_mask, hdr.src_addr),
ULP_PRSR_ACT_DEFAULT);
+ dip_idx = idx;
size = sizeof(((struct rte_flow_item_ipv6 *)NULL)->hdr.dst_addr);
ulp_rte_prsr_fld_mask(params, &idx, size,
ulp_deference_struct(ipv6_spec, hdr.dst_addr),
} else {
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_IPV6);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3, 1);
+ /* Update the tunnel offload dest ip offset */
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID,
+ dip_idx);
}
/* Update the field protocol hdr bitmap */
/* Function to handle the update of proto header based on field values */
static void
-ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
- uint16_t dst_port)
+ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
+ uint16_t src_port, uint16_t src_mask,
+ uint16_t dst_port, uint16_t dst_mask,
+ enum bnxt_ulp_hdr_bit hdr_bit)
{
- if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN))
- ULP_BITMAP_SET(param->hdr_fp_bit.bits,
- BNXT_ULP_HDR_BIT_T_VXLAN);
+ switch (hdr_bit) {
+ case BNXT_ULP_HDR_BIT_I_UDP:
+ case BNXT_ULP_HDR_BIT_I_TCP:
+ ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
+ (uint64_t)rte_be_to_cpu_16(src_port));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
+ (uint64_t)rte_be_to_cpu_16(dst_port));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
+ (uint64_t)rte_be_to_cpu_16(src_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
+ (uint64_t)rte_be_to_cpu_16(dst_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
+ 1);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
+ !!(src_port & src_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
+ !!(dst_port & dst_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
+ (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
+ IPPROTO_UDP : IPPROTO_TCP);
+ break;
+ case BNXT_ULP_HDR_BIT_O_UDP:
+ case BNXT_ULP_HDR_BIT_O_TCP:
+ ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
+ (uint64_t)rte_be_to_cpu_16(src_port));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
+ (uint64_t)rte_be_to_cpu_16(dst_port));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
+ (uint64_t)rte_be_to_cpu_16(src_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
+ (uint64_t)rte_be_to_cpu_16(dst_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
+ 1);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
+ !!(src_port & src_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
+ !!(dst_port & dst_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
+ (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
+ IPPROTO_UDP : IPPROTO_TCP);
+ break;
+ default:
+ break;
+ }
- if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
- BNXT_ULP_HDR_BIT_T_VXLAN) ||
- ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
- BNXT_ULP_HDR_BIT_T_GRE))
- ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
+ tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
+ ULP_BITMAP_SET(params->hdr_fp_bit.bits,
+ BNXT_ULP_HDR_BIT_T_VXLAN);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ }
}
/* Function to handle the parsing of RTE Flow item UDP Header. */
uint32_t idx = 0;
uint32_t size;
uint16_t dport = 0, sport = 0;
+ uint16_t dport_mask = 0, sport_mask = 0;
uint32_t cnt;
+ enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
if (cnt == 2) {
sport = udp_spec->hdr.src_port;
dport = udp_spec->hdr.dst_port;
}
+ if (udp_mask) {
+ sport_mask = udp_mask->hdr.src_port;
+ dport_mask = udp_mask->hdr.dst_port;
+ }
if (ulp_rte_prsr_fld_size_validate(params, &idx,
BNXT_ULP_PROTO_HDR_UDP_NUM)) {
/* Set the udp header bitmap and computed l4 header bitmaps */
if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
- ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
- ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
- (uint32_t)rte_be_to_cpu_16(sport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
- (uint32_t)rte_be_to_cpu_16(dport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
- 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
- IPPROTO_UDP);
- if (udp_mask && udp_mask->hdr.src_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
- 1);
- if (udp_mask && udp_mask->hdr.dst_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
- 1);
- } else {
- ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
- (uint32_t)rte_be_to_cpu_16(sport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
- (uint32_t)rte_be_to_cpu_16(dport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
- 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
- IPPROTO_UDP);
- if (udp_mask && udp_mask->hdr.src_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
- 1);
- if (udp_mask && udp_mask->hdr.dst_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
- 1);
+ ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
+ out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
- /* Update the field protocol hdr bitmap */
- ulp_rte_l4_proto_type_update(params, dport);
- }
+ ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
+ dport_mask, out_l4);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
return BNXT_TF_RC_SUCCESS;
}
struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
uint32_t idx = 0;
uint16_t dport = 0, sport = 0;
+ uint16_t dport_mask = 0, sport_mask = 0;
uint32_t size;
uint32_t cnt;
+ enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
if (cnt == 2) {
sport = tcp_spec->hdr.src_port;
dport = tcp_spec->hdr.dst_port;
}
+ if (tcp_mask) {
+ sport_mask = tcp_mask->hdr.src_port;
+ dport_mask = tcp_mask->hdr.dst_port;
+ }
if (ulp_rte_prsr_fld_size_validate(params, &idx,
BNXT_ULP_PROTO_HDR_TCP_NUM)) {
/* Set the udp header bitmap and computed l4 header bitmaps */
if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
- ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
- ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
- (uint32_t)rte_be_to_cpu_16(sport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
- (uint32_t)rte_be_to_cpu_16(dport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
- 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
- IPPROTO_TCP);
- if (tcp_mask && tcp_mask->hdr.src_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
- 1);
- if (tcp_mask && tcp_mask->hdr.dst_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
- 1);
- } else {
- ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
- (uint32_t)rte_be_to_cpu_16(sport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
- (uint32_t)rte_be_to_cpu_16(dport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
- 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
- IPPROTO_TCP);
- if (tcp_mask && tcp_mask->hdr.src_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
- 1);
- if (tcp_mask && tcp_mask->hdr.dst_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
- 1);
- }
+ ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
+ out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
+
+ ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
+ dport_mask, out_l4);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
return BNXT_TF_RC_SUCCESS;
}
/* Update the hdr_bitmap with vxlan */
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
- ulp_rte_l4_proto_type_update(params, 0);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
return BNXT_TF_RC_SUCCESS;
}
/* Update the hdr_bitmap with GRE */
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
- ulp_rte_l4_proto_type_update(params, 0);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
return BNXT_TF_RC_SUCCESS;
}
return BNXT_TF_RC_SUCCESS;
}
+/* Function to handle the parsing of RTE Flow item eth Header. */
+static void
+ulp_rte_enc_eth_hdr_handler(struct ulp_rte_parser_params *params,
+ const struct rte_flow_item_eth *eth_spec)
+{
+ struct ulp_rte_hdr_field *field;
+ uint32_t size;
+
+ field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_ETH_DMAC];
+ size = sizeof(eth_spec->dst.addr_bytes);
+ field = ulp_rte_parser_fld_copy(field, eth_spec->dst.addr_bytes, size);
+
+ size = sizeof(eth_spec->src.addr_bytes);
+ field = ulp_rte_parser_fld_copy(field, eth_spec->src.addr_bytes, size);
+
+ size = sizeof(eth_spec->type);
+ field = ulp_rte_parser_fld_copy(field, ð_spec->type, size);
+
+ ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_ETH);
+}
+
+/* Function to handle the parsing of RTE Flow item vlan Header. */
+static void
+ulp_rte_enc_vlan_hdr_handler(struct ulp_rte_parser_params *params,
+ const struct rte_flow_item_vlan *vlan_spec,
+ uint32_t inner)
+{
+ struct ulp_rte_hdr_field *field;
+ uint32_t size;
+
+ if (!inner) {
+ field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_O_VLAN_TCI];
+ ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_OO_VLAN);
+ } else {
+ field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_I_VLAN_TCI];
+ ULP_BITMAP_SET(params->enc_hdr_bitmap.bits,
+ BNXT_ULP_HDR_BIT_OI_VLAN);
+ }
+
+ size = sizeof(vlan_spec->tci);
+ field = ulp_rte_parser_fld_copy(field, &vlan_spec->tci, size);
+
+ size = sizeof(vlan_spec->inner_type);
+ field = ulp_rte_parser_fld_copy(field, &vlan_spec->inner_type, size);
+}
+
+/* Function to handle the parsing of RTE Flow item ipv4 Header. */
+static void
+ulp_rte_enc_ipv4_hdr_handler(struct ulp_rte_parser_params *params,
+ const struct rte_flow_item_ipv4 *ip)
+{
+ struct ulp_rte_hdr_field *field;
+ uint32_t size;
+ uint8_t val8;
+
+ field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_IHL];
+ size = sizeof(ip->hdr.version_ihl);
+ if (!ip->hdr.version_ihl)
+ val8 = RTE_IPV4_VHL_DEF;
+ else
+ val8 = ip->hdr.version_ihl;
+ field = ulp_rte_parser_fld_copy(field, &val8, size);
+
+ size = sizeof(ip->hdr.type_of_service);
+ field = ulp_rte_parser_fld_copy(field, &ip->hdr.type_of_service, size);
+
+ size = sizeof(ip->hdr.packet_id);
+ field = ulp_rte_parser_fld_copy(field, &ip->hdr.packet_id, size);
+
+ size = sizeof(ip->hdr.fragment_offset);
+ field = ulp_rte_parser_fld_copy(field, &ip->hdr.fragment_offset, size);
+
+ size = sizeof(ip->hdr.time_to_live);
+ if (!ip->hdr.time_to_live)
+ val8 = BNXT_ULP_DEFAULT_TTL;
+ else
+ val8 = ip->hdr.time_to_live;
+ field = ulp_rte_parser_fld_copy(field, &val8, size);
+
+ size = sizeof(ip->hdr.next_proto_id);
+ field = ulp_rte_parser_fld_copy(field, &ip->hdr.next_proto_id, size);
+
+ size = sizeof(ip->hdr.src_addr);
+ field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
+
+ size = sizeof(ip->hdr.dst_addr);
+ field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
+
+ ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4);
+}
+
+/* Function to handle the parsing of RTE Flow item ipv6 Header. */
+static void
+ulp_rte_enc_ipv6_hdr_handler(struct ulp_rte_parser_params *params,
+ const struct rte_flow_item_ipv6 *ip)
+{
+ struct ulp_rte_hdr_field *field;
+ uint32_t size;
+ uint32_t val32;
+ uint8_t val8;
+
+ field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_VTC_FLOW];
+ size = sizeof(ip->hdr.vtc_flow);
+ if (!ip->hdr.vtc_flow)
+ val32 = rte_cpu_to_be_32(BNXT_ULP_IPV6_DFLT_VER);
+ else
+ val32 = ip->hdr.vtc_flow;
+ field = ulp_rte_parser_fld_copy(field, &val32, size);
+
+ size = sizeof(ip->hdr.proto);
+ field = ulp_rte_parser_fld_copy(field, &ip->hdr.proto, size);
+
+ size = sizeof(ip->hdr.hop_limits);
+ if (!ip->hdr.hop_limits)
+ val8 = BNXT_ULP_DEFAULT_TTL;
+ else
+ val8 = ip->hdr.hop_limits;
+ field = ulp_rte_parser_fld_copy(field, &val8, size);
+
+ size = sizeof(ip->hdr.src_addr);
+ field = ulp_rte_parser_fld_copy(field, &ip->hdr.src_addr, size);
+
+ size = sizeof(ip->hdr.dst_addr);
+ field = ulp_rte_parser_fld_copy(field, &ip->hdr.dst_addr, size);
+
+ ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV6);
+}
+
+/* Function to handle the parsing of RTE Flow item UDP Header. */
+static void
+ulp_rte_enc_udp_hdr_handler(struct ulp_rte_parser_params *params,
+ const struct rte_flow_item_udp *udp_spec)
+{
+ struct ulp_rte_hdr_field *field;
+ uint32_t size;
+ uint8_t type = IPPROTO_UDP;
+
+ field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_UDP_SPORT];
+ size = sizeof(udp_spec->hdr.src_port);
+ field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.src_port, size);
+
+ size = sizeof(udp_spec->hdr.dst_port);
+ field = ulp_rte_parser_fld_copy(field, &udp_spec->hdr.dst_port, size);
+
+ ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_UDP);
+
+ /* Update thhe ip header protocol */
+ field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV4_PROTO];
+ ulp_rte_parser_fld_copy(field, &type, sizeof(type));
+ field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_IPV6_PROTO];
+ ulp_rte_parser_fld_copy(field, &type, sizeof(type));
+}
+
+/* Function to handle the parsing of RTE Flow item vxlan Header. */
+static void
+ulp_rte_enc_vxlan_hdr_handler(struct ulp_rte_parser_params *params,
+ struct rte_flow_item_vxlan *vxlan_spec)
+{
+ struct ulp_rte_hdr_field *field;
+ uint32_t size;
+
+ field = ¶ms->enc_field[BNXT_ULP_ENC_FIELD_VXLAN_FLAGS];
+ size = sizeof(vxlan_spec->flags);
+ field = ulp_rte_parser_fld_copy(field, &vxlan_spec->flags, size);
+
+ size = sizeof(vxlan_spec->rsvd0);
+ field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd0, size);
+
+ size = sizeof(vxlan_spec->vni);
+ field = ulp_rte_parser_fld_copy(field, &vxlan_spec->vni, size);
+
+ size = sizeof(vxlan_spec->rsvd1);
+ field = ulp_rte_parser_fld_copy(field, &vxlan_spec->rsvd1, size);
+
+ ULP_BITMAP_SET(params->enc_hdr_bitmap.bits, BNXT_ULP_HDR_BIT_T_VXLAN);
+}
+
/* Function to handle the parsing of RTE Flow action vxlan_encap Header. */
int32_t
ulp_rte_vxlan_encap_act_handler(const struct rte_flow_action *action_item,
{
const struct rte_flow_action_vxlan_encap *vxlan_encap;
const struct rte_flow_item *item;
- const struct rte_flow_item_eth *eth_spec;
const struct rte_flow_item_ipv4 *ipv4_spec;
const struct rte_flow_item_ipv6 *ipv6_spec;
struct rte_flow_item_vxlan vxlan_spec;
uint32_t vlan_num = 0, vlan_size = 0;
uint32_t ip_size = 0, ip_type = 0;
uint32_t vxlan_size = 0;
- uint8_t *buff;
- /* IP header per byte - ver/hlen, TOS, ID, ID, FRAG, FRAG, TTL, PROTO */
- const uint8_t def_ipv4_hdr[] = {0x45, 0x00, 0x00, 0x01, 0x00,
- 0x00, 0x40, 0x11};
- /* IPv6 header per byte - vtc-flow,flow,zero,nexthdr-ttl */
- const uint8_t def_ipv6_hdr[] = {0x60, 0x00, 0x00, 0x01, 0x00,
- 0x00, 0x11, 0xf6};
struct ulp_rte_act_bitmap *act = ¶ms->act_bitmap;
struct ulp_rte_act_prop *ap = ¶ms->act_prop;
- const uint8_t *tmp_buff;
vxlan_encap = action_item->conf;
if (!vxlan_encap) {
BNXT_TF_DBG(ERR, "Parse Error:vxlan encap does not have eth\n");
return BNXT_TF_RC_ERROR;
}
- eth_spec = item->spec;
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_DMAC];
- ulp_encap_buffer_copy(buff,
- eth_spec->dst.addr_bytes,
- BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_DMAC,
- ULP_BUFFER_ALIGN_8_BYTE);
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_L2_SMAC];
- ulp_encap_buffer_copy(buff,
- eth_spec->src.addr_bytes,
- BNXT_ULP_ACT_PROP_SZ_ENCAP_L2_SMAC,
- ULP_BUFFER_ALIGN_8_BYTE);
+ /* Parse the ethernet header */
+ if (item->spec)
+ ulp_rte_enc_eth_hdr_handler(params, item->spec);
/* Goto the next item */
if (!ulp_rte_item_skip_void(&item, 1))
/* May have vlan header */
if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
vlan_num++;
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG];
- ulp_encap_buffer_copy(buff,
- item->spec,
- sizeof(struct rte_flow_item_vlan),
- ULP_BUFFER_ALIGN_8_BYTE);
+ if (item->spec)
+ ulp_rte_enc_vlan_hdr_handler(params, item->spec, 0);
if (!ulp_rte_item_skip_void(&item, 1))
return BNXT_TF_RC_ERROR;
/* may have two vlan headers */
if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
vlan_num++;
- memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_VTAG +
- sizeof(struct rte_flow_item_vlan)],
- item->spec,
- sizeof(struct rte_flow_item_vlan));
+ if (item->spec)
+ ulp_rte_enc_vlan_hdr_handler(params, item->spec, 1);
+
if (!ulp_rte_item_skip_void(&item, 1))
return BNXT_TF_RC_ERROR;
}
+
/* Update the vlan count and size of more than one */
if (vlan_num) {
vlan_size = vlan_num * sizeof(struct rte_flow_item_vlan);
ipv4_spec = item->spec;
ip_size = BNXT_ULP_ENCAP_IPV4_SIZE;
- /* copy the ipv4 details */
- if (ulp_buffer_is_empty(&ipv4_spec->hdr.version_ihl,
- BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS)) {
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
- ulp_encap_buffer_copy(buff,
- def_ipv4_hdr,
- BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
- BNXT_ULP_ENCAP_IPV4_ID_PROTO,
- ULP_BUFFER_ALIGN_8_BYTE);
- } else {
- /* Total length being ignored in the ip hdr. */
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
- tmp_buff = (const uint8_t *)&ipv4_spec->hdr.packet_id;
- ulp_encap_buffer_copy(buff,
- tmp_buff,
- BNXT_ULP_ENCAP_IPV4_ID_PROTO,
- ULP_BUFFER_ALIGN_8_BYTE);
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
- BNXT_ULP_ENCAP_IPV4_ID_PROTO];
- ulp_encap_buffer_copy(buff,
- &ipv4_spec->hdr.version_ihl,
- BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS,
- ULP_BUFFER_ALIGN_8_BYTE);
- }
-
- /* Update the dst ip address in ip encap buffer */
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
- BNXT_ULP_ENCAP_IPV4_VER_HLEN_TOS +
- BNXT_ULP_ENCAP_IPV4_ID_PROTO];
- ulp_encap_buffer_copy(buff,
- (const uint8_t *)&ipv4_spec->hdr.dst_addr,
- sizeof(ipv4_spec->hdr.dst_addr),
- ULP_BUFFER_ALIGN_8_BYTE);
-
- /* Update the src ip address */
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC +
- BNXT_ULP_ACT_PROP_SZ_ENCAP_IP_SRC -
- sizeof(ipv4_spec->hdr.src_addr)];
- ulp_encap_buffer_copy(buff,
- (const uint8_t *)&ipv4_spec->hdr.src_addr,
- sizeof(ipv4_spec->hdr.src_addr),
- ULP_BUFFER_ALIGN_8_BYTE);
-
/* Update the ip size details */
ip_size = tfp_cpu_to_be_32(ip_size);
memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
/* update the computed field to notify it is ipv4 header */
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV4_FLAG,
1);
+ if (ipv4_spec)
+ ulp_rte_enc_ipv4_hdr_handler(params, ipv4_spec);
if (!ulp_rte_item_skip_void(&item, 1))
return BNXT_TF_RC_ERROR;
ipv6_spec = item->spec;
ip_size = BNXT_ULP_ENCAP_IPV6_SIZE;
- /* copy the ipv6 details */
- tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
- if (ulp_buffer_is_empty(tmp_buff,
- BNXT_ULP_ENCAP_IPV6_VTC_FLOW)) {
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
- ulp_encap_buffer_copy(buff,
- def_ipv6_hdr,
- sizeof(def_ipv6_hdr),
- ULP_BUFFER_ALIGN_8_BYTE);
- } else {
- /* The payload length being ignored in the ip hdr. */
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP];
- tmp_buff = (const uint8_t *)&ipv6_spec->hdr.proto;
- ulp_encap_buffer_copy(buff,
- tmp_buff,
- BNXT_ULP_ENCAP_IPV6_PROTO_TTL,
- ULP_BUFFER_ALIGN_8_BYTE);
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
- BNXT_ULP_ENCAP_IPV6_PROTO_TTL +
- BNXT_ULP_ENCAP_IPV6_DO];
- tmp_buff = (const uint8_t *)&ipv6_spec->hdr.vtc_flow;
- ulp_encap_buffer_copy(buff,
- tmp_buff,
- BNXT_ULP_ENCAP_IPV6_VTC_FLOW,
- ULP_BUFFER_ALIGN_8_BYTE);
- }
- /* Update the dst ip address in ip encap buffer */
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP +
- sizeof(def_ipv6_hdr)];
- ulp_encap_buffer_copy(buff,
- (const uint8_t *)ipv6_spec->hdr.dst_addr,
- sizeof(ipv6_spec->hdr.dst_addr),
- ULP_BUFFER_ALIGN_8_BYTE);
-
- /* Update the src ip address */
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SRC];
- ulp_encap_buffer_copy(buff,
- (const uint8_t *)ipv6_spec->hdr.src_addr,
- sizeof(ipv6_spec->hdr.src_addr),
- ULP_BUFFER_ALIGN_16_BYTE);
-
/* Update the ip size details */
ip_size = tfp_cpu_to_be_32(ip_size);
memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_IP_SZ],
/* update the computed field to notify it is ipv6 header */
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_ENCAP_IPV6_FLAG,
1);
+ if (ipv6_spec)
+ ulp_rte_enc_ipv6_hdr_handler(params, ipv6_spec);
if (!ulp_rte_item_skip_void(&item, 1))
return BNXT_TF_RC_ERROR;
BNXT_TF_DBG(ERR, "vxlan encap does not have udp\n");
return BNXT_TF_RC_ERROR;
}
- /* copy the udp details */
- ulp_encap_buffer_copy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_UDP],
- item->spec, BNXT_ULP_ENCAP_UDP_SIZE,
- ULP_BUFFER_ALIGN_8_BYTE);
+ if (item->spec)
+ ulp_rte_enc_udp_hdr_handler(params, item->spec);
if (!ulp_rte_item_skip_void(&item, 1))
return BNXT_TF_RC_ERROR;
/* copy the vxlan details */
memcpy(&vxlan_spec, item->spec, vxlan_size);
vxlan_spec.flags = 0x08;
- buff = &ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN];
- if (ip_type == rte_cpu_to_be_32(BNXT_ULP_ETH_IPV4)) {
- ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
- vxlan_size, ULP_BUFFER_ALIGN_8_BYTE);
- } else {
- ulp_encap_buffer_copy(buff, (const uint8_t *)&vxlan_spec,
- vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
- ulp_encap_buffer_copy(buff + (vxlan_size / 2),
- (const uint8_t *)&vxlan_spec.vni,
- vxlan_size / 2, ULP_BUFFER_ALIGN_8_BYTE);
- }
vxlan_size = tfp_cpu_to_be_32(vxlan_size);
memcpy(&ap->act_details[BNXT_ULP_ACT_PROP_IDX_ENCAP_TUN_SZ],
&vxlan_size, sizeof(uint32_t));
+ ulp_rte_enc_vxlan_hdr_handler(params, &vxlan_spec);
+
/* update the hdr_bitmap with vxlan */
ULP_BITMAP_SET(act->bits, BNXT_ULP_ACT_BIT_VXLAN_ENCAP);
return BNXT_TF_RC_SUCCESS;
act_count = action_item->conf;
if (act_count) {
- if (act_count->shared) {
- BNXT_TF_DBG(ERR,
- "Parse Error:Shared count not supported\n");
- return BNXT_TF_RC_PARSE_ERR;
- }
memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
&act_count->id,
BNXT_ULP_ACT_PROP_SZ_COUNT);
/* Function to handle the parsing of action ports. */
static int32_t
ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
- uint32_t ifindex)
+ uint32_t ifindex,
+ enum bnxt_ulp_direction_type act_dir)
{
enum bnxt_ulp_direction_type dir;
uint16_t pid_s;
uint32_t vnic_type;
/* Get the direction */
- dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
- if (dir == BNXT_ULP_DIR_EGRESS) {
+ /* If action implicitly specifies direction, use the specification. */
+ dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
+ ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
+ act_dir;
+ port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
+ if (dir == BNXT_ULP_DIR_EGRESS &&
+ port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
/* For egress direction, fill vport */
if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
return BNXT_TF_RC_ERROR;
&pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
} else {
/* For ingress direction, fill vnic */
- port_type = ULP_COMP_FLD_IDX_RD(param,
- BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
- if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
+ /*
+ * Action Destination
+ * ------------------------------------
+ * PORT_REPRESENTOR Driver Function
+ * ------------------------------------
+ * REPRESENTED_PORT VF
+ * ------------------------------------
+ * PORT_ID VF
+ */
+ if (act_dir != BNXT_ULP_DIR_INGRESS &&
+ port_type == BNXT_ULP_INTF_TYPE_VF_REP)
vnic_type = BNXT_ULP_VF_FUNC_VNIC;
else
vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
}
/* Update the action properties */
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
- return ulp_rte_parser_act_port_set(params, ifindex);
+ return ulp_rte_parser_act_port_set(params, ifindex,
+ BNXT_ULP_DIR_INVALID);
}
/* Function to handle the parsing of RTE Flow action VF. */
return BNXT_TF_RC_PARSE_ERR;
}
- bp = bnxt_get_bp(params->port_id);
+ bp = bnxt_pmd_get_bp(params->port_id);
if (bp == NULL) {
BNXT_TF_DBG(ERR, "Invalid bp\n");
return BNXT_TF_RC_ERROR;
/* Update the action properties */
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
- return ulp_rte_parser_act_port_set(params, ifindex);
+ return ulp_rte_parser_act_port_set(params, ifindex,
+ BNXT_ULP_DIR_INVALID);
}
-/* Function to handle the parsing of RTE Flow action port_id. */
+/* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
int32_t
-ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
- struct ulp_rte_parser_params *param)
+ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
+ struct ulp_rte_parser_params *param)
{
- const struct rte_flow_action_port_id *port_id = act_item->conf;
+ uint32_t ethdev_id;
uint32_t ifindex;
enum bnxt_ulp_intf_type intf_type;
+ enum bnxt_ulp_direction_type act_dir;
- if (!port_id) {
+ if (!act_item->conf) {
BNXT_TF_DBG(ERR,
"ParseErr: Invalid Argument\n");
return BNXT_TF_RC_PARSE_ERR;
}
- if (port_id->original) {
- BNXT_TF_DBG(ERR,
- "ParseErr:Portid Original not supported\n");
- return BNXT_TF_RC_PARSE_ERR;
+ switch (act_item->type) {
+ case RTE_FLOW_ACTION_TYPE_PORT_ID: {
+ const struct rte_flow_action_port_id *port_id = act_item->conf;
+
+ if (port_id->original) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:Portid Original not supported\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+ ethdev_id = port_id->id;
+ act_dir = BNXT_ULP_DIR_INVALID;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
+ const struct rte_flow_action_ethdev *ethdev = act_item->conf;
+
+ ethdev_id = ethdev->port_id;
+ act_dir = BNXT_ULP_DIR_INGRESS;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
+ const struct rte_flow_action_ethdev *ethdev = act_item->conf;
+
+ ethdev_id = ethdev->port_id;
+ act_dir = BNXT_ULP_DIR_EGRESS;
+ break;
+ }
+ default:
+ BNXT_TF_DBG(ERR, "Unknown port action\n");
+ return BNXT_TF_RC_ERROR;
}
/* Get the port db ifindex */
- if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
+ if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
&ifindex)) {
BNXT_TF_DBG(ERR, "Invalid port id\n");
return BNXT_TF_RC_ERROR;
/* Set the action port */
ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
- return ulp_rte_parser_act_port_set(param, ifindex);
+ return ulp_rte_parser_act_port_set(param, ifindex, act_dir);
}
/* Function to handle the parsing of RTE Flow action phy_port. */
return ret;
}
+
+/* Function to handle the parsing of bnxt vendor Flow action vxlan Header. */
+int32_t
+ulp_vendor_vxlan_decap_act_handler(const struct rte_flow_action *action_item,
+ struct ulp_rte_parser_params *params)
+{
+ /* Set the F1 flow header bit */
+ ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
+ return ulp_rte_vxlan_decap_act_handler(action_item, params);
+}
+
+/* Function to handle the parsing of bnxt vendor Flow item vxlan Header. */
+int32_t
+ulp_rte_vendor_vxlan_decap_hdr_handler(const struct rte_flow_item *item,
+ struct ulp_rte_parser_params *params)
+{
+ RTE_SET_USED(item);
+ /* Set the F2 flow header bit */
+ ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F2);
+ return ulp_rte_vxlan_decap_act_handler(NULL, params);
+}