X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcxgbe%2Fcxgbe_flow.c;h=d731fdc1a0c8f8a6c878642b19a476525f2f92c8;hb=7d71ba5d15ce4db117a02f4a79de7c9d22a5fdba;hp=cd833d095a0e42e1dcf22ef90e96a392f9a5f17d;hpb=55f003d8884c3a05937ef7ef113f698870d71a83;p=dpdk.git diff --git a/drivers/net/cxgbe/cxgbe_flow.c b/drivers/net/cxgbe/cxgbe_flow.c index cd833d095a..d731fdc1a0 100644 --- a/drivers/net/cxgbe/cxgbe_flow.c +++ b/drivers/net/cxgbe/cxgbe_flow.c @@ -154,9 +154,17 @@ cxgbe_fill_filter_region(struct adapter *adap, if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld) ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) << tp->vlan_shift; - if (tp->vnic_shift >= 0 && fs->mask.ovlan_vld) - ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ovlan) << - tp->vnic_shift; + if (tp->vnic_shift >= 0) { + if (fs->mask.ovlan_vld) + ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 | + fs->mask.ovlan) << tp->vnic_shift; + else if (fs->mask.pfvf_vld) + ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 | + fs->mask.pf << 13 | + fs->mask.vf) << tp->vnic_shift; + } + if (tp->tos_shift >= 0) + ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift; if (ntuple_mask != hash_filter_mask) return; @@ -219,6 +227,9 @@ ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item, mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask; + if (!val) + return 0; /* Wildcard, match all physical ports */ + if (val->index > 0x7) return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, @@ -289,6 +300,50 @@ ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item, return 0; } +static int +ch_rte_parsetype_pf(const void *dmask __rte_unused, + const struct rte_flow_item *item __rte_unused, + struct ch_filter_specification *fs, + struct rte_flow_error *e __rte_unused) +{ + struct rte_flow *flow = (struct rte_flow *)fs->private; + struct rte_eth_dev *dev = flow->dev; + struct adapter *adap = ethdev2adap(dev); + + CXGBE_FILL_FS(1, 1, pfvf_vld); + + CXGBE_FILL_FS(adap->pf, 0x7, pf); + return 0; +} + +static int +ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item, + struct ch_filter_specification *fs, + struct rte_flow_error *e) +{ + const struct rte_flow_item_vf *umask = item->mask; + const struct rte_flow_item_vf *val = item->spec; + const struct rte_flow_item_vf *mask; + + /* If user has not given any mask, then use chelsio supported mask. */ + mask = umask ? umask : (const struct rte_flow_item_vf *)dmask; + + CXGBE_FILL_FS(1, 1, pfvf_vld); + + if (!val) + return 0; /* Wildcard, match all Vf */ + + if (val->id > UCHAR_MAX) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "VF ID > MAX(255)"); + + CXGBE_FILL_FS(val->id, mask->id, vf); + + return 0; +} + static int ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item, struct ch_filter_specification *fs, @@ -354,9 +409,9 @@ ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item, mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask; - if (mask->hdr.time_to_live || mask->hdr.type_of_service) + if (mask->hdr.time_to_live) return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, - item, "ttl/tos are not supported"); + item, "ttl is not supported"); if (fs->mask.ethtype && (fs->val.ethtype != RTE_ETHER_TYPE_IPV4)) @@ -370,6 +425,7 @@ ch_rte_parsetype_ipv4(const void *dmask, const struct rte_flow_item *item, CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto); CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip); CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip); + CXGBE_FILL_FS(val->hdr.type_of_service, mask->hdr.type_of_service, tos); return 0; } @@ -382,14 +438,17 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item, const struct rte_flow_item_ipv6 *val = item->spec; const struct rte_flow_item_ipv6 *umask = item->mask; const struct rte_flow_item_ipv6 *mask; + u32 vtc_flow, vtc_flow_mask; mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask; - if (mask->hdr.vtc_flow || + vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow); + + if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK || mask->hdr.payload_len || mask->hdr.hop_limits) return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item, - "tc/flow/hop are not supported"); + "flow/hop are not supported"); if (fs->mask.ethtype && (fs->val.ethtype != RTE_ETHER_TYPE_IPV6)) @@ -401,6 +460,14 @@ ch_rte_parsetype_ipv6(const void *dmask, const struct rte_flow_item *item, return 0; /* ipv6 wild card */ CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto); + + vtc_flow = be32_to_cpu(val->hdr.vtc_flow); + CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT, + (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT, + tos); + CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip); CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip); @@ -580,6 +647,7 @@ ch_rte_parse_atype_switch(const struct rte_flow_action *a, const struct rte_flow_action_set_ipv6 *ipv6; const struct rte_flow_action_set_tp *tp_port; const struct rte_flow_action_phy_port *port; + const struct rte_flow_action_set_mac *mac; int item_index; u16 tmp_vlan; @@ -727,6 +795,18 @@ ch_rte_parse_atype_switch(const struct rte_flow_action *a, "found"); fs->swapmac = 1; break; + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: + item_index = cxgbe_get_flow_item_index(items, + RTE_FLOW_ITEM_TYPE_ETH); + if (item_index < 0) + return rte_flow_error_set(e, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, a, + "No RTE_FLOW_ITEM_TYPE_ETH found"); + mac = (const struct rte_flow_action_set_mac *)a->conf; + + fs->newdmac = 1; + memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac)); + break; default: /* We are not supposed to come here */ return rte_flow_error_set(e, EINVAL, @@ -803,6 +883,7 @@ cxgbe_rtef_parse_actions(struct rte_flow *flow, goto action_switch; case RTE_FLOW_ACTION_TYPE_SET_TP_SRC: case RTE_FLOW_ACTION_TYPE_SET_TP_DST: + case RTE_FLOW_ACTION_TYPE_SET_MAC_DST: action_switch: /* We allow multiple switch actions, but switch is * not compatible with either queue or drop @@ -871,12 +952,28 @@ static struct chrte_fparse parseitem[] = { [RTE_FLOW_ITEM_TYPE_IPV4] = { .fptr = ch_rte_parsetype_ipv4, - .dmask = &rte_flow_item_ipv4_mask, + .dmask = &(const struct rte_flow_item_ipv4) { + .hdr = { + .src_addr = RTE_BE32(0xffffffff), + .dst_addr = RTE_BE32(0xffffffff), + .type_of_service = 0xff, + }, + }, }, [RTE_FLOW_ITEM_TYPE_IPV6] = { .fptr = ch_rte_parsetype_ipv6, - .dmask = &rte_flow_item_ipv6_mask, + .dmask = &(const struct rte_flow_item_ipv6) { + .hdr = { + .src_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .dst_addr = + "\xff\xff\xff\xff\xff\xff\xff\xff" + "\xff\xff\xff\xff\xff\xff\xff\xff", + .vtc_flow = RTE_BE32(0xff000000), + }, + }, }, [RTE_FLOW_ITEM_TYPE_UDP] = { @@ -888,6 +985,18 @@ static struct chrte_fparse parseitem[] = { .fptr = ch_rte_parsetype_tcp, .dmask = &rte_flow_item_tcp_mask, }, + + [RTE_FLOW_ITEM_TYPE_PF] = { + .fptr = ch_rte_parsetype_pf, + .dmask = NULL, + }, + + [RTE_FLOW_ITEM_TYPE_VF] = { + .fptr = ch_rte_parsetype_vf, + .dmask = &(const struct rte_flow_item_vf){ + .id = 0xffffffff, + } + }, }; static int @@ -921,10 +1030,6 @@ cxgbe_rtef_parse_items(struct rte_flow *flow, repeat[i->type] = 1; - /* No spec found for this pattern item. Skip it */ - if (!i->spec) - break; - /* validate the item */ ret = cxgbe_validate_item(i, e); if (ret)