if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
- U(F_TOS, tos))
+ U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
return -EOPNOTSUPP;
- /* Ensure OVLAN match is enabled in hardware */
- if (S(ovlan_vld) && (iconf & F_VNIC))
+ /* Either OVLAN or PFVF match is enabled in hardware, but not both */
+ if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
+ (S(ovlan_vld) && (iconf & F_VNIC)))
return -EOPNOTSUPP;
- /* To use OVLAN, L4 encapsulation match must not be enabled */
- if (S(ovlan_vld) && (iconf & F_USE_ENC_IDX))
+ /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
+ if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
+ (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
return -EOPNOTSUPP;
#undef S
ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
tp->vlan_shift;
if (tp->vnic_shift >= 0) {
- if (!(adap->params.tp.ingress_config & F_VNIC) &&
- f->fs.mask.ovlan_vld)
+ if ((adap->params.tp.ingress_config & F_VNIC) &&
+ f->fs.mask.pfvf_vld)
+ ntuple |= (u64)((f->fs.val.pfvf_vld << 16) |
+ (f->fs.val.pf << 13)) << tp->vnic_shift;
+ else if (!(adap->params.tp.ingress_config & F_VNIC) &&
+ f->fs.mask.ovlan_vld)
ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
f->fs.val.ovlan) << tp->vnic_shift;
}
{
struct port_info *pi = ethdev2pinfo(dev);
struct adapter *adapter = pi->adapter;
- unsigned int fidx, iq;
+ u8 nentries, bitoff[16] = {0};
struct filter_entry *f;
unsigned int chip_ver;
- u8 nentries, bitoff[16] = {0};
+ unsigned int fidx, iq;
+ u32 iconf;
int ret;
if (is_hashfilter(adapter) && fs->cap)
f->fs.iq = iq;
f->dev = dev;
+ iconf = adapter->params.tp.ingress_config;
+
+ /* Either PFVF or OVLAN can be active, but not both
+ * So, if PFVF is enabled, then overwrite the OVLAN
+ * fields with PFVF fields before writing the spec
+ * to hardware.
+ */
+ if (iconf & F_VNIC) {
+ f->fs.val.ovlan = fs->val.pf << 13;
+ f->fs.mask.ovlan = fs->mask.pf << 13;
+ f->fs.val.ovlan_vld = fs->val.pfvf_vld;
+ f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+ }
+
/*
* Attempt to set the filter. If we don't succeed, we clear
* it and return the failure.
if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
tp->vlan_shift;
- if (tp->vnic_shift >= 0 && fs->mask.ovlan_vld)
- ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ovlan) <<
- tp->vnic_shift;
+ if (tp->vnic_shift >= 0) {
+ if (fs->mask.ovlan_vld)
+ ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
+ fs->mask.ovlan) << tp->vnic_shift;
+ else if (fs->mask.pfvf_vld)
+ ntuple_mask |= (u64)((fs->mask.pfvf_vld << 16) |
+ (fs->mask.pf << 13)) <<
+ tp->vnic_shift;
+ }
if (tp->tos_shift >= 0)
ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
+ if (!val)
+ return 0; /* Wildcard, match all physical ports */
+
if (val->index > 0x7)
return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
item,
return 0;
}
+static int
+ch_rte_parsetype_pf(const void *dmask __rte_unused,
+ const struct rte_flow_item *item __rte_unused,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e __rte_unused)
+{
+ struct rte_flow *flow = (struct rte_flow *)fs->private;
+ struct rte_eth_dev *dev = flow->dev;
+ struct adapter *adap = ethdev2adap(dev);
+
+ CXGBE_FILL_FS(1, 1, pfvf_vld);
+
+ CXGBE_FILL_FS(adap->pf, 0x7, pf);
+ return 0;
+}
+
static int
ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
struct ch_filter_specification *fs,
.fptr = ch_rte_parsetype_tcp,
.dmask = &rte_flow_item_tcp_mask,
},
+
+ [RTE_FLOW_ITEM_TYPE_PF] = {
+ .fptr = ch_rte_parsetype_pf,
+ .dmask = NULL,
+ },
};
static int
repeat[i->type] = 1;
- /* No spec found for this pattern item. Skip it */
- if (!i->spec)
- break;
-
/* validate the item */
ret = cxgbe_validate_item(i, e);
if (ret)