otx2_flow_parse_ld(struct otx2_parse_state *pst)
{
char hw_mask[NPC_MAX_EXTRACT_DATA_LEN];
+ uint32_t gre_key_mask = 0xffffffff;
struct otx2_flow_item_info info;
int lid, lt, lflags;
int rc;
info.def_mask = &rte_flow_item_gre_mask;
info.len = sizeof(struct rte_flow_item_gre);
break;
- case RTE_FLOW_ITEM_TYPE_NVGRE:
+ case RTE_FLOW_ITEM_TYPE_GRE_KEY:
lt = NPC_LT_LD_GRE;
+ info.def_mask = &gre_key_mask;
+ info.len = sizeof(gre_key_mask);
+ info.hw_hdr_len = 4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ lt = NPC_LT_LD_NVGRE;
lflags = NPC_F_GRE_NVGRE;
info.def_mask = &rte_flow_item_nvgre_mask;
info.len = sizeof(struct rte_flow_item_nvgre);
info.def_mask = &rte_flow_item_arp_eth_ipv4_mask;
info.len = sizeof(struct rte_flow_item_arp_eth_ipv4);
break;
+ case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
+ lid = NPC_LID_LC;
+ lt = NPC_LT_LC_IP6_EXT;
+ info.def_mask = &rte_flow_item_ipv6_ext_mask;
+ info.len = sizeof(struct rte_flow_item_ipv6_ext);
+ info.hw_hdr_len = 40;
+ break;
default:
/* No match at this layer */
return 0;
lt = NPC_LT_LB_CTAG;
break;
case 2:
- lt = NPC_LT_LB_STAG;
+ lt = NPC_LT_LB_STAG_QINQ;
lflags = NPC_F_STAG_CTAG;
break;
case 3:
- lt = NPC_LT_LB_STAG;
+ lt = NPC_LT_LB_STAG_QINQ;
lflags = NPC_F_STAG_STAG_CTAG;
break;
default:
if (pst->flow->nix_intf == NIX_INTF_TX) {
lt = NPC_LT_LA_IH_NIX_ETHER;
info.hw_hdr_len = NPC_IH_LENGTH;
+ if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
+ lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
+ info.hw_hdr_len += NPC_HIGIG2_LENGTH;
+ }
+ } else {
+ if (pst->npc->switch_header_type == OTX2_PRIV_FLAGS_HIGIG) {
+ lt = NPC_LT_LA_HIGIG2_ETHER;
+ info.hw_hdr_len = NPC_HIGIG2_LENGTH;
+ }
}
/* Prepare for parsing the item */
return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
}
+int
+otx2_flow_parse_higig2_hdr(struct otx2_parse_state *pst)
+{
+ struct rte_flow_item_higig2_hdr hw_mask;
+ struct otx2_flow_item_info info;
+ int lid, lt;
+ int rc;
+
+ /* Identify the pattern type into lid, lt */
+ if (pst->pattern->type != RTE_FLOW_ITEM_TYPE_HIGIG2)
+ return 0;
+
+ lid = NPC_LID_LA;
+ lt = NPC_LT_LA_HIGIG2_ETHER;
+ info.hw_hdr_len = 0;
+
+ if (pst->flow->nix_intf == NIX_INTF_TX) {
+ lt = NPC_LT_LA_IH_NIX_HIGIG2_ETHER;
+ info.hw_hdr_len = NPC_IH_LENGTH;
+ }
+
+ /* Prepare for parsing the item */
+ info.def_mask = &rte_flow_item_higig2_hdr_mask;
+ info.hw_mask = &hw_mask;
+ info.len = sizeof(struct rte_flow_item_higig2_hdr);
+ otx2_flow_get_hw_supp_mask(pst, &info, lid, lt);
+ info.spec = NULL;
+ info.mask = NULL;
+
+ /* Basic validation of item parameters */
+ rc = otx2_flow_parse_item_basic(pst->pattern, &info, pst->error);
+ if (rc)
+ return rc;
+
+ /* Update pst if not validate only? clash check? */
+ return otx2_flow_update_parse_state(pst, &info, lid, lt, 0);
+}
+
static int
parse_rss_action(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_action_count *act_count;
const struct rte_flow_action_mark *act_mark;
const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_vf *vf_act;
const char *errmsg = NULL;
int sel_act, req_act = 0;
- uint16_t pf_func;
+ uint16_t pf_func, vf_id;
int errcode = 0;
int mark = 0;
int rq = 0;
/* Initialize actions */
flow->ctr_id = NPC_COUNTER_NONE;
+ pf_func = otx2_pfvf_func(hw->pf, hw->vf);
for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
otx2_npc_dbg("Action type = %d", actions->type);
req_act |= OTX2_FLOW_ACT_DROP;
break;
+ case RTE_FLOW_ACTION_TYPE_PF:
+ req_act |= OTX2_FLOW_ACT_PF;
+ pf_func &= (0xfc00);
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_VF:
+ vf_act = (const struct rte_flow_action_vf *)
+ actions->conf;
+ req_act |= OTX2_FLOW_ACT_VF;
+ if (vf_act->original == 0) {
+ vf_id = vf_act->id & RVU_PFVF_FUNC_MASK;
+ if (vf_id >= hw->maxvf) {
+ errmsg = "invalid vf specified";
+ errcode = EINVAL;
+ goto err_exit;
+ }
+ pf_func &= (0xfc00);
+ pf_func = (pf_func | (vf_id + 1));
+ }
+ break;
+
case RTE_FLOW_ACTION_TYPE_QUEUE:
/* Applicable only to ingress flow */
act_q = (const struct rte_flow_action_queue *)
}
/* Set NIX_RX_ACTIONOP */
- if (req_act & OTX2_FLOW_ACT_DROP) {
+ if (req_act & (OTX2_FLOW_ACT_PF | OTX2_FLOW_ACT_VF)) {
+ flow->npc_action = NIX_RX_ACTIONOP_UCAST;
+ if (req_act & OTX2_FLOW_ACT_QUEUE)
+ flow->npc_action |= (uint64_t)rq << 20;
+ } else if (req_act & OTX2_FLOW_ACT_DROP) {
flow->npc_action = NIX_RX_ACTIONOP_DROP;
} else if (req_act & OTX2_FLOW_ACT_QUEUE) {
flow->npc_action = NIX_RX_ACTIONOP_UCAST;
if (mark)
flow->npc_action |= (uint64_t)mark << 40;
- if (rte_atomic32_read(&npc->mark_actions) == 1)
+ if (rte_atomic32_read(&npc->mark_actions) == 1) {
hw->rx_offload_flags |=
NIX_RX_OFFLOAD_MARK_UPDATE_F;
+ otx2_eth_set_rx_function(dev);
+ }
set_pf_func:
/* Ideally AF must ensure that correct pf_func is set */
- pf_func = otx2_pfvf_func(hw->pf, hw->vf);
flow->npc_action |= (uint64_t)pf_func << 4;
return 0;