if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
tp->vlan_shift;
- if (tp->vnic_shift >= 0 && fs->mask.ovlan_vld)
- ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ovlan) <<
- tp->vnic_shift;
+ if (tp->vnic_shift >= 0) {
+ if (fs->mask.ovlan_vld)
+ ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
+ fs->mask.ovlan) << tp->vnic_shift;
+ else if (fs->mask.pfvf_vld)
+ ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 |
+ fs->mask.pf << 13 |
+ fs->mask.vf) << tp->vnic_shift;
+ }
+ if (tp->tos_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
if (ntuple_mask != hash_filter_mask)
return;
mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
+ if (!val)
+ return 0; /* Wildcard, match all physical ports */
+
if (val->index > 0x7)
return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
item,
return 0;
}
+static int
+ch_rte_parsetype_pf(const void *dmask __rte_unused,
+ const struct rte_flow_item *item __rte_unused,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e __rte_unused)
+{
+ struct rte_flow *flow = (struct rte_flow *)fs->private;
+ struct rte_eth_dev *dev = flow->dev;
+ struct adapter *adap = ethdev2adap(dev);
+
+ CXGBE_FILL_FS(1, 1, pfvf_vld);
+
+ CXGBE_FILL_FS(adap->pf, 0x7, pf);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_vf *umask = item->mask;
+ const struct rte_flow_item_vf *val = item->spec;
+ const struct rte_flow_item_vf *mask;
+
+ /* If user has not given any mask, then use chelsio supported mask. */
+ mask = umask ? umask : (const struct rte_flow_item_vf *)dmask;
+
+ CXGBE_FILL_FS(1, 1, pfvf_vld);
+
+ if (!val)
+ return 0; /* Wildcard, match all Vf */
+
+ if (val->id > UCHAR_MAX)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VF ID > MAX(255)");
+
+ CXGBE_FILL_FS(val->id, mask->id, vf);
+
+ return 0;
+}
+
static int
ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
struct ch_filter_specification *fs,
mask = umask ? umask : (const struct rte_flow_item_ipv4 *)dmask;
- if (mask->hdr.time_to_live || mask->hdr.type_of_service)
+ if (mask->hdr.time_to_live)
return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
- item, "ttl/tos are not supported");
+ item, "ttl is not supported");
if (fs->mask.ethtype &&
(fs->val.ethtype != RTE_ETHER_TYPE_IPV4))
CXGBE_FILL_FS(val->hdr.next_proto_id, mask->hdr.next_proto_id, proto);
CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
+ CXGBE_FILL_FS(val->hdr.type_of_service, mask->hdr.type_of_service, tos);
return 0;
}
const struct rte_flow_item_ipv6 *val = item->spec;
const struct rte_flow_item_ipv6 *umask = item->mask;
const struct rte_flow_item_ipv6 *mask;
+ u32 vtc_flow, vtc_flow_mask;
mask = umask ? umask : (const struct rte_flow_item_ipv6 *)dmask;
- if (mask->hdr.vtc_flow ||
+ vtc_flow_mask = be32_to_cpu(mask->hdr.vtc_flow);
+
+ if (vtc_flow_mask & RTE_IPV6_HDR_FL_MASK ||
mask->hdr.payload_len || mask->hdr.hop_limits)
return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "tc/flow/hop are not supported");
+ "flow/hop are not supported");
if (fs->mask.ethtype &&
(fs->val.ethtype != RTE_ETHER_TYPE_IPV6))
return 0; /* ipv6 wild card */
CXGBE_FILL_FS(val->hdr.proto, mask->hdr.proto, proto);
+
+ vtc_flow = be32_to_cpu(val->hdr.vtc_flow);
+ CXGBE_FILL_FS((vtc_flow & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT,
+ (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT,
+ tos);
+
CXGBE_FILL_FS_MEMCPY(val->hdr.dst_addr, mask->hdr.dst_addr, lip);
CXGBE_FILL_FS_MEMCPY(val->hdr.src_addr, mask->hdr.src_addr, fip);
const struct rte_flow_action_set_ipv6 *ipv6;
const struct rte_flow_action_set_tp *tp_port;
const struct rte_flow_action_phy_port *port;
+ const struct rte_flow_action_set_mac *mac;
int item_index;
u16 tmp_vlan;
"found");
fs->swapmac = 1;
break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ item_index = cxgbe_get_flow_item_index(items,
+ RTE_FLOW_ITEM_TYPE_ETH);
+ if (item_index < 0)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, a,
+ "No RTE_FLOW_ITEM_TYPE_ETH found");
+ mac = (const struct rte_flow_action_set_mac *)a->conf;
+
+ fs->newdmac = 1;
+ memcpy(fs->dmac, mac->mac_addr, sizeof(fs->dmac));
+ break;
default:
/* We are not supposed to come here */
return rte_flow_error_set(e, EINVAL,
goto action_switch;
case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
action_switch:
/* We allow multiple switch actions, but switch is
* not compatible with either queue or drop
[RTE_FLOW_ITEM_TYPE_IPV4] = {
.fptr = ch_rte_parsetype_ipv4,
- .dmask = &rte_flow_item_ipv4_mask,
+ .dmask = &(const struct rte_flow_item_ipv4) {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ },
+ },
},
[RTE_FLOW_ITEM_TYPE_IPV6] = {
.fptr = ch_rte_parsetype_ipv6,
- .dmask = &rte_flow_item_ipv6_mask,
+ .dmask = &(const struct rte_flow_item_ipv6) {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xff000000),
+ },
+ },
},
[RTE_FLOW_ITEM_TYPE_UDP] = {
.fptr = ch_rte_parsetype_tcp,
.dmask = &rte_flow_item_tcp_mask,
},
+
+ [RTE_FLOW_ITEM_TYPE_PF] = {
+ .fptr = ch_rte_parsetype_pf,
+ .dmask = NULL,
+ },
+
+ [RTE_FLOW_ITEM_TYPE_VF] = {
+ .fptr = ch_rte_parsetype_vf,
+ .dmask = &(const struct rte_flow_item_vf){
+ .id = 0xffffffff,
+ }
+ },
};
static int
repeat[i->type] = 1;
- /* No spec found for this pattern item. Skip it */
- if (!i->spec)
- break;
-
/* validate the item */
ret = cxgbe_validate_item(i, e);
if (ret)