static int32_t
ulp_rte_parser_svif_set(struct ulp_rte_parser_params *params,
uint32_t ifindex,
- uint16_t mask)
+ uint16_t mask,
+ enum bnxt_ulp_direction_type item_dir)
{
uint16_t svif;
enum bnxt_ulp_direction_type dir;
bnxt_ulp_rte_parser_direction_compute(params);
/* Get the computed direction */
- dir = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
- if (dir == BNXT_ULP_DIR_INGRESS) {
+ dir = (item_dir != BNXT_ULP_DIR_INVALID) ? item_dir :
+ ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_DIRECTION);
+ if (dir == BNXT_ULP_DIR_INGRESS &&
+ port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
svif_type = BNXT_ULP_PHY_PORT_SVIF;
} else {
- if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
+ if (port_type == BNXT_ULP_INTF_TYPE_VF_REP &&
+ item_dir != BNXT_ULP_DIR_EGRESS)
svif_type = BNXT_ULP_VF_FUNC_SVIF;
else
svif_type = BNXT_ULP_DRV_FUNC_SVIF;
}
/* Update the SVIF details */
- rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask);
+ rc = ulp_rte_parser_svif_set(params, ifindex, svif_mask,
+ BNXT_ULP_DIR_INVALID);
return rc;
}
return BNXT_TF_RC_SUCCESS;
}
port_id.id = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_INCOMING_IF);
+ action_item.type = RTE_FLOW_ACTION_TYPE_PORT_ID;
action_item.conf = &port_id;
/* Update the action port based on incoming port */
- ulp_rte_port_id_act_handler(&action_item, params);
+ ulp_rte_port_act_handler(&action_item, params);
/* Reset the action port set bit */
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_IS_SET, 0);
}
/* Update the SVIF details */
- return ulp_rte_parser_svif_set(params, ifindex, svif_mask);
+ return ulp_rte_parser_svif_set(params, ifindex, svif_mask,
+ BNXT_ULP_DIR_INVALID);
}
/* Function to handle the parsing of RTE Flow item VF Header. */
return rc;
}
/* Update the SVIF details */
- return ulp_rte_parser_svif_set(params, ifindex, mask);
+ return ulp_rte_parser_svif_set(params, ifindex, mask,
+ BNXT_ULP_DIR_INVALID);
}
-/* Function to handle the parsing of RTE Flow item port id Header. */
+/* Parse items PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
int32_t
-ulp_rte_port_id_hdr_handler(const struct rte_flow_item *item,
- struct ulp_rte_parser_params *params)
+ulp_rte_port_hdr_handler(const struct rte_flow_item *item,
+ struct ulp_rte_parser_params *params)
{
- const struct rte_flow_item_port_id *port_spec = item->spec;
- const struct rte_flow_item_port_id *port_mask = item->mask;
+ enum bnxt_ulp_direction_type item_dir;
+ uint16_t ethdev_id;
uint16_t mask = 0;
int32_t rc = BNXT_TF_RC_PARSE_ERR;
uint32_t ifindex;
- if (!port_spec) {
- BNXT_TF_DBG(ERR, "ParseErr:Port id is not valid\n");
+ if (!item->spec) {
+ BNXT_TF_DBG(ERR, "ParseErr:Port spec is not valid\n");
return rc;
}
- if (!port_mask) {
- BNXT_TF_DBG(ERR, "ParseErr:Phy Port mask is not valid\n");
+ if (!item->mask) {
+ BNXT_TF_DBG(ERR, "ParseErr:Port mask is not valid\n");
+ return rc;
+ }
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_PORT_ID: {
+ const struct rte_flow_item_port_id *port_spec = item->spec;
+ const struct rte_flow_item_port_id *port_mask = item->mask;
+
+ item_dir = BNXT_ULP_DIR_INVALID;
+ ethdev_id = port_spec->id;
+ mask = port_mask->id;
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_PORT_REPRESENTOR: {
+ const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
+ const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
+
+ item_dir = BNXT_ULP_DIR_INGRESS;
+ ethdev_id = ethdev_spec->port_id;
+ mask = ethdev_mask->port_id;
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT: {
+ const struct rte_flow_item_ethdev *ethdev_spec = item->spec;
+ const struct rte_flow_item_ethdev *ethdev_mask = item->mask;
+
+ item_dir = BNXT_ULP_DIR_EGRESS;
+ ethdev_id = ethdev_spec->port_id;
+ mask = ethdev_mask->port_id;
+ break;
+ }
+ default:
+ BNXT_TF_DBG(ERR, "ParseErr:Unexpected item\n");
return rc;
}
- mask = port_mask->id;
/* perform the conversion from dpdk port to bnxt ifindex */
if (ulp_port_db_dev_port_to_ulp_index(params->ulp_ctx,
- port_spec->id,
+ ethdev_id,
&ifindex)) {
BNXT_TF_DBG(ERR, "ParseErr:Portid is not valid\n");
return rc;
}
/* Update the SVIF details */
- return ulp_rte_parser_svif_set(params, ifindex, mask);
+ return ulp_rte_parser_svif_set(params, ifindex, mask, item_dir);
}
/* Function to handle the parsing of RTE Flow item phy port Header. */
*/
ulp_rte_prsr_fld_mask(params, &idx, size,
&priority,
- &priority_mask,
+ (vlan_mask) ? &priority_mask : NULL,
ULP_PRSR_ACT_MASK_IGNORE);
ulp_rte_prsr_fld_mask(params, &idx, size,
&vlan_tag,
- &vlan_tag_mask,
+ (vlan_mask) ? &vlan_tag_mask : NULL,
ULP_PRSR_ACT_DEFAULT);
size = sizeof(((struct rte_flow_item_vlan *)NULL)->inner_type);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 1);
ULP_BITMAP_SET(params->hdr_bitmap.bits,
BNXT_ULP_HDR_BIT_OO_VLAN);
+ if (vlan_mask && vlan_tag_mask)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_OO_VLAN_FB_VID, 1);
+
} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
!ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
outer_vtag_num == 1) {
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_ONE_VTAG, 0);
ULP_BITMAP_SET(params->hdr_bitmap.bits,
BNXT_ULP_HDR_BIT_OI_VLAN);
+ if (vlan_mask && vlan_tag_mask)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_OI_VLAN_FB_VID, 1);
+
} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
!inner_vtag_num) {
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 1);
ULP_BITMAP_SET(params->hdr_bitmap.bits,
BNXT_ULP_HDR_BIT_IO_VLAN);
+ if (vlan_mask && vlan_tag_mask)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_IO_VLAN_FB_VID, 1);
inner_flag = 1;
} else if (ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_O_ETH) &&
ULP_BITMAP_ISSET(hdr_bit->bits, BNXT_ULP_HDR_BIT_I_ETH) &&
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_ONE_VTAG, 0);
ULP_BITMAP_SET(params->hdr_bitmap.bits,
BNXT_ULP_HDR_BIT_II_VLAN);
+ if (vlan_mask && vlan_tag_mask)
+ ULP_COMP_FLD_IDX_WR(params,
+ BNXT_ULP_CF_IDX_II_VLAN_FB_VID, 1);
inner_flag = 1;
} else {
BNXT_TF_DBG(ERR, "Error Parsing:Vlan hdr found without eth\n");
/* Function to handle the update of proto header based on field values */
static void
-ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *param,
- uint16_t dst_port)
-{
- if (dst_port == tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
- ULP_BITMAP_SET(param->hdr_fp_bit.bits,
- BNXT_ULP_HDR_BIT_T_VXLAN);
- ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ulp_rte_l4_proto_type_update(struct ulp_rte_parser_params *params,
+ uint16_t src_port, uint16_t src_mask,
+ uint16_t dst_port, uint16_t dst_mask,
+ enum bnxt_ulp_hdr_bit hdr_bit)
+{
+ switch (hdr_bit) {
+ case BNXT_ULP_HDR_BIT_I_UDP:
+ case BNXT_ULP_HDR_BIT_I_TCP:
+ ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
+ (uint64_t)rte_be_to_cpu_16(src_port));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
+ (uint64_t)rte_be_to_cpu_16(dst_port));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT_MASK,
+ (uint64_t)rte_be_to_cpu_16(src_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT_MASK,
+ (uint64_t)rte_be_to_cpu_16(dst_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
+ 1);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
+ !!(src_port & src_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
+ !!(dst_port & dst_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
+ (hdr_bit == BNXT_ULP_HDR_BIT_I_UDP) ?
+ IPPROTO_UDP : IPPROTO_TCP);
+ break;
+ case BNXT_ULP_HDR_BIT_O_UDP:
+ case BNXT_ULP_HDR_BIT_O_TCP:
+ ULP_BITMAP_SET(params->hdr_bitmap.bits, hdr_bit);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
+ (uint64_t)rte_be_to_cpu_16(src_port));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
+ (uint64_t)rte_be_to_cpu_16(dst_port));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT_MASK,
+ (uint64_t)rte_be_to_cpu_16(src_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT_MASK,
+ (uint64_t)rte_be_to_cpu_16(dst_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
+ 1);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
+ !!(src_port & src_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
+ !!(dst_port & dst_mask));
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
+ (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP) ?
+ IPPROTO_UDP : IPPROTO_TCP);
+ break;
+ default:
+ break;
}
- if (ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
- BNXT_ULP_HDR_BIT_T_VXLAN) ||
- ULP_BITMAP_ISSET(param->hdr_bitmap.bits,
- BNXT_ULP_HDR_BIT_T_GRE))
- ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ if (hdr_bit == BNXT_ULP_HDR_BIT_O_UDP && dst_port ==
+ tfp_cpu_to_be_16(ULP_UDP_PORT_VXLAN)) {
+ ULP_BITMAP_SET(params->hdr_fp_bit.bits,
+ BNXT_ULP_HDR_BIT_T_VXLAN);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
+ }
}
/* Function to handle the parsing of RTE Flow item UDP Header. */
uint32_t idx = 0;
uint32_t size;
uint16_t dport = 0, sport = 0;
+ uint16_t dport_mask = 0, sport_mask = 0;
uint32_t cnt;
+ enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_UDP;
cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
if (cnt == 2) {
sport = udp_spec->hdr.src_port;
dport = udp_spec->hdr.dst_port;
}
+ if (udp_mask) {
+ sport_mask = udp_mask->hdr.src_port;
+ dport_mask = udp_mask->hdr.dst_port;
+ }
if (ulp_rte_prsr_fld_size_validate(params, &idx,
BNXT_ULP_PROTO_HDR_UDP_NUM)) {
/* Set the udp header bitmap and computed l4 header bitmaps */
if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
- ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
- ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_UDP);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
- (uint32_t)rte_be_to_cpu_16(sport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
- (uint32_t)rte_be_to_cpu_16(dport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
- 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
- IPPROTO_UDP);
- if (udp_mask && udp_mask->hdr.src_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
- 1);
- if (udp_mask && udp_mask->hdr.dst_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
- 1);
- } else {
- ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
- (uint32_t)rte_be_to_cpu_16(sport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
- (uint32_t)rte_be_to_cpu_16(dport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
- 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
- IPPROTO_UDP);
- if (udp_mask && udp_mask->hdr.src_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
- 1);
- if (udp_mask && udp_mask->hdr.dst_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
- 1);
+ ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
+ out_l4 = BNXT_ULP_HDR_BIT_I_UDP;
- /* Update the field protocol hdr bitmap */
- ulp_rte_l4_proto_type_update(params, dport);
- }
+ ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
+ dport_mask, out_l4);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
return BNXT_TF_RC_SUCCESS;
}
struct ulp_rte_hdr_bitmap *hdr_bitmap = ¶ms->hdr_bitmap;
uint32_t idx = 0;
uint16_t dport = 0, sport = 0;
+ uint16_t dport_mask = 0, sport_mask = 0;
uint32_t size;
uint32_t cnt;
+ enum bnxt_ulp_hdr_bit out_l4 = BNXT_ULP_HDR_BIT_O_TCP;
cnt = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L4_HDR_CNT);
if (cnt == 2) {
sport = tcp_spec->hdr.src_port;
dport = tcp_spec->hdr.dst_port;
}
+ if (tcp_mask) {
+ sport_mask = tcp_mask->hdr.src_port;
+ dport_mask = tcp_mask->hdr.dst_port;
+ }
if (ulp_rte_prsr_fld_size_validate(params, &idx,
BNXT_ULP_PROTO_HDR_TCP_NUM)) {
/* Set the udp header bitmap and computed l4 header bitmaps */
if (ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_UDP) ||
- ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP)) {
- ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_I_TCP);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_SRC_PORT,
- (uint32_t)rte_be_to_cpu_16(sport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L4_DST_PORT,
- (uint32_t)rte_be_to_cpu_16(dport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_FB_PROTO_ID,
- 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_I_L3_PROTO_ID,
- IPPROTO_TCP);
- if (tcp_mask && tcp_mask->hdr.src_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_I_L4_FB_SRC_PORT,
- 1);
- if (tcp_mask && tcp_mask->hdr.dst_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_I_L4_FB_DST_PORT,
- 1);
- } else {
- ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4, 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_SRC_PORT,
- (uint32_t)rte_be_to_cpu_16(sport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L4_DST_PORT,
- (uint32_t)rte_be_to_cpu_16(dport));
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_FB_PROTO_ID,
- 1);
- ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_O_L3_PROTO_ID,
- IPPROTO_TCP);
- if (tcp_mask && tcp_mask->hdr.src_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_O_L4_FB_SRC_PORT,
- 1);
- if (tcp_mask && tcp_mask->hdr.dst_port)
- ULP_COMP_FLD_IDX_WR(params,
- BNXT_ULP_CF_IDX_O_L4_FB_DST_PORT,
- 1);
- }
+ ULP_BITMAP_ISSET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_O_TCP))
+ out_l4 = BNXT_ULP_HDR_BIT_I_TCP;
+
+ ulp_rte_l4_proto_type_update(params, sport, sport_mask, dport,
+ dport_mask, out_l4);
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L4_HDR_CNT, ++cnt);
return BNXT_TF_RC_SUCCESS;
}
/* Update the hdr_bitmap with vxlan */
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_VXLAN);
- ulp_rte_l4_proto_type_update(params, 0);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
return BNXT_TF_RC_SUCCESS;
}
/* Update the hdr_bitmap with GRE */
ULP_BITMAP_SET(hdr_bitmap->bits, BNXT_ULP_HDR_BIT_T_GRE);
- ulp_rte_l4_proto_type_update(params, 0);
+ ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_L3_TUN, 1);
return BNXT_TF_RC_SUCCESS;
}
act_count = action_item->conf;
if (act_count) {
- if (act_count->shared) {
- BNXT_TF_DBG(ERR,
- "Parse Error:Shared count not supported\n");
- return BNXT_TF_RC_PARSE_ERR;
- }
memcpy(&act_prop->act_details[BNXT_ULP_ACT_PROP_IDX_COUNT],
&act_count->id,
BNXT_ULP_ACT_PROP_SZ_COUNT);
/* Function to handle the parsing of action ports. */
static int32_t
ulp_rte_parser_act_port_set(struct ulp_rte_parser_params *param,
- uint32_t ifindex)
+ uint32_t ifindex,
+ enum bnxt_ulp_direction_type act_dir)
{
enum bnxt_ulp_direction_type dir;
uint16_t pid_s;
uint32_t vnic_type;
/* Get the direction */
- dir = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION);
- if (dir == BNXT_ULP_DIR_EGRESS) {
+ /* If action implicitly specifies direction, use the specification. */
+ dir = (act_dir == BNXT_ULP_DIR_INVALID) ?
+ ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_DIRECTION) :
+ act_dir;
+ port_type = ULP_COMP_FLD_IDX_RD(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
+ if (dir == BNXT_ULP_DIR_EGRESS &&
+ port_type != BNXT_ULP_INTF_TYPE_VF_REP) {
/* For egress direction, fill vport */
if (ulp_port_db_vport_get(param->ulp_ctx, ifindex, &pid_s))
return BNXT_TF_RC_ERROR;
&pid, BNXT_ULP_ACT_PROP_SZ_VPORT);
} else {
/* For ingress direction, fill vnic */
- port_type = ULP_COMP_FLD_IDX_RD(param,
- BNXT_ULP_CF_IDX_ACT_PORT_TYPE);
- if (port_type == BNXT_ULP_INTF_TYPE_VF_REP)
+ /*
+ * Action Destination
+ * ------------------------------------
+ * PORT_REPRESENTOR Driver Function
+ * ------------------------------------
+ * REPRESENTED_PORT VF
+ * ------------------------------------
+ * PORT_ID VF
+ */
+ if (act_dir != BNXT_ULP_DIR_INGRESS &&
+ port_type == BNXT_ULP_INTF_TYPE_VF_REP)
vnic_type = BNXT_ULP_VF_FUNC_VNIC;
else
vnic_type = BNXT_ULP_DRV_FUNC_VNIC;
}
/* Update the action properties */
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
- return ulp_rte_parser_act_port_set(params, ifindex);
+ return ulp_rte_parser_act_port_set(params, ifindex,
+ BNXT_ULP_DIR_INVALID);
}
/* Function to handle the parsing of RTE Flow action VF. */
/* Update the action properties */
ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
- return ulp_rte_parser_act_port_set(params, ifindex);
+ return ulp_rte_parser_act_port_set(params, ifindex,
+ BNXT_ULP_DIR_INVALID);
}
-/* Function to handle the parsing of RTE Flow action port_id. */
+/* Parse actions PORT_ID, PORT_REPRESENTOR and REPRESENTED_PORT. */
int32_t
-ulp_rte_port_id_act_handler(const struct rte_flow_action *act_item,
- struct ulp_rte_parser_params *param)
+ulp_rte_port_act_handler(const struct rte_flow_action *act_item,
+ struct ulp_rte_parser_params *param)
{
- const struct rte_flow_action_port_id *port_id = act_item->conf;
+ uint32_t ethdev_id;
uint32_t ifindex;
enum bnxt_ulp_intf_type intf_type;
+ enum bnxt_ulp_direction_type act_dir;
- if (!port_id) {
+ if (!act_item->conf) {
BNXT_TF_DBG(ERR,
"ParseErr: Invalid Argument\n");
return BNXT_TF_RC_PARSE_ERR;
}
- if (port_id->original) {
- BNXT_TF_DBG(ERR,
- "ParseErr:Portid Original not supported\n");
- return BNXT_TF_RC_PARSE_ERR;
+ switch (act_item->type) {
+ case RTE_FLOW_ACTION_TYPE_PORT_ID: {
+ const struct rte_flow_action_port_id *port_id = act_item->conf;
+
+ if (port_id->original) {
+ BNXT_TF_DBG(ERR,
+ "ParseErr:Portid Original not supported\n");
+ return BNXT_TF_RC_PARSE_ERR;
+ }
+ ethdev_id = port_id->id;
+ act_dir = BNXT_ULP_DIR_INVALID;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_PORT_REPRESENTOR: {
+ const struct rte_flow_action_ethdev *ethdev = act_item->conf;
+
+ ethdev_id = ethdev->port_id;
+ act_dir = BNXT_ULP_DIR_INGRESS;
+ break;
+ }
+ case RTE_FLOW_ACTION_TYPE_REPRESENTED_PORT: {
+ const struct rte_flow_action_ethdev *ethdev = act_item->conf;
+
+ ethdev_id = ethdev->port_id;
+ act_dir = BNXT_ULP_DIR_EGRESS;
+ break;
+ }
+ default:
+ BNXT_TF_DBG(ERR, "Unknown port action\n");
+ return BNXT_TF_RC_ERROR;
}
/* Get the port db ifindex */
- if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, port_id->id,
+ if (ulp_port_db_dev_port_to_ulp_index(param->ulp_ctx, ethdev_id,
&ifindex)) {
BNXT_TF_DBG(ERR, "Invalid port id\n");
return BNXT_TF_RC_ERROR;
/* Set the action port */
ULP_COMP_FLD_IDX_WR(param, BNXT_ULP_CF_IDX_ACT_PORT_TYPE, intf_type);
- return ulp_rte_parser_act_port_set(param, ifindex);
+ return ulp_rte_parser_act_port_set(param, ifindex, act_dir);
}
/* Function to handle the parsing of RTE Flow action phy_port. */