net/cxgbe: support flow API for matching all packets on PF
authorKarra Satwik <kaara.satwik@chelsio.com>
Wed, 11 Mar 2020 09:05:45 +0000 (14:35 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 18 Mar 2020 14:29:39 +0000 (15:29 +0100)
Add support to match all packets received on the underlying PF

Note that the same 17-bit hardware tuple is shared between QinQ
and PF match. Hence, match on either QinQ or PF only can be done
at a time. Both QinQ and PF match can't be enabled at the same time.

Also, remove check to reject rules without spec because
RTE_FLOW_ITEM_TYPE_PF doesn't require a spec. Due to this check
removal, RTE_FLOW_ITEM_TYPE_PHY_PORT item needs to be updated to
handle NULL spec

Signed-off-by: Karra Satwik <kaara.satwik@chelsio.com>
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
drivers/net/cxgbe/cxgbe_filter.c
drivers/net/cxgbe/cxgbe_filter.h
drivers/net/cxgbe/cxgbe_flow.c

index 193738f..4c50932 100644 (file)
@@ -73,15 +73,17 @@ int cxgbe_validate_filter(struct adapter *adapter,
        if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
            U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
            U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
-           U(F_TOS, tos))
+           U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
                return -EOPNOTSUPP;
 
-       /* Ensure OVLAN match is enabled in hardware */
-       if (S(ovlan_vld) && (iconf & F_VNIC))
+       /* Either OVLAN or PFVF match is enabled in hardware, but not both */
+       if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
+           (S(ovlan_vld) && (iconf & F_VNIC)))
                return -EOPNOTSUPP;
 
-       /* To use OVLAN, L4 encapsulation match must not be enabled */
-       if (S(ovlan_vld) && (iconf & F_USE_ENC_IDX))
+       /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
+       if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
+           (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
                return -EOPNOTSUPP;
 
 #undef S
@@ -308,8 +310,12 @@ static u64 hash_filter_ntuple(const struct filter_entry *f)
                ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
                          tp->vlan_shift;
        if (tp->vnic_shift >= 0) {
-               if (!(adap->params.tp.ingress_config & F_VNIC) &&
-                   f->fs.mask.ovlan_vld)
+               if ((adap->params.tp.ingress_config & F_VNIC) &&
+                   f->fs.mask.pfvf_vld)
+                       ntuple |= (u64)((f->fs.val.pfvf_vld << 16) |
+                                       (f->fs.val.pf << 13)) << tp->vnic_shift;
+               else if (!(adap->params.tp.ingress_config & F_VNIC) &&
+                        f->fs.mask.ovlan_vld)
                        ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
                                        f->fs.val.ovlan) << tp->vnic_shift;
        }
@@ -965,10 +971,11 @@ int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
 {
        struct port_info *pi = ethdev2pinfo(dev);
        struct adapter *adapter = pi->adapter;
-       unsigned int fidx, iq;
+       u8 nentries, bitoff[16] = {0};
        struct filter_entry *f;
        unsigned int chip_ver;
-       u8 nentries, bitoff[16] = {0};
+       unsigned int fidx, iq;
+       u32 iconf;
        int ret;
 
        if (is_hashfilter(adapter) && fs->cap)
@@ -1052,6 +1059,20 @@ int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
        f->fs.iq = iq;
        f->dev = dev;
 
+       iconf = adapter->params.tp.ingress_config;
+
+       /* Either PFVF or OVLAN can be active, but not both
+        * So, if PFVF is enabled, then overwrite the OVLAN
+        * fields with PFVF fields before writing the spec
+        * to hardware.
+        */
+       if (iconf & F_VNIC) {
+               f->fs.val.ovlan = fs->val.pf << 13;
+               f->fs.mask.ovlan = fs->mask.pf << 13;
+               f->fs.val.ovlan_vld = fs->val.pfvf_vld;
+               f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+       }
+
        /*
         * Attempt to set the filter.  If we don't succeed, we clear
         * it and return the failure.
index 06021c8..2ac2100 100644 (file)
@@ -18,7 +18,7 @@
 #define MATCHTYPE_BITWIDTH 3
 #define PROTO_BITWIDTH 8
 #define TOS_BITWIDTH 8
-#define PF_BITWIDTH 8
+#define PF_BITWIDTH 3
 #define VF_BITWIDTH 8
 #define IVLAN_BITWIDTH 16
 #define OVLAN_BITWIDTH 16
index c860b78..138d075 100644 (file)
@@ -154,9 +154,15 @@ cxgbe_fill_filter_region(struct adapter *adap,
        if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
                ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
                               tp->vlan_shift;
-       if (tp->vnic_shift >= 0 && fs->mask.ovlan_vld)
-               ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ovlan) <<
-                              tp->vnic_shift;
+       if (tp->vnic_shift >= 0) {
+               if (fs->mask.ovlan_vld)
+                       ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
+                                            fs->mask.ovlan) << tp->vnic_shift;
+               else if (fs->mask.pfvf_vld)
+                       ntuple_mask |= (u64)((fs->mask.pfvf_vld << 16) |
+                                            (fs->mask.pf << 13)) <<
+                                            tp->vnic_shift;
+       }
        if (tp->tos_shift >= 0)
                ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
 
@@ -221,6 +227,9 @@ ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
 
        mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
 
+       if (!val)
+               return 0; /* Wildcard, match all physical ports */
+
        if (val->index > 0x7)
                return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
                                          item,
@@ -291,6 +300,22 @@ ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
        return 0;
 }
 
+static int
+ch_rte_parsetype_pf(const void *dmask __rte_unused,
+                   const struct rte_flow_item *item __rte_unused,
+                   struct ch_filter_specification *fs,
+                   struct rte_flow_error *e __rte_unused)
+{
+       struct rte_flow *flow = (struct rte_flow *)fs->private;
+       struct rte_eth_dev *dev = flow->dev;
+       struct adapter *adap = ethdev2adap(dev);
+
+       CXGBE_FILL_FS(1, 1, pfvf_vld);
+
+       CXGBE_FILL_FS(adap->pf, 0x7, pf);
+       return 0;
+}
+
 static int
 ch_rte_parsetype_udp(const void *dmask, const struct rte_flow_item *item,
                     struct ch_filter_specification *fs,
@@ -918,6 +943,11 @@ static struct chrte_fparse parseitem[] = {
                .fptr  = ch_rte_parsetype_tcp,
                .dmask = &rte_flow_item_tcp_mask,
        },
+
+       [RTE_FLOW_ITEM_TYPE_PF] = {
+               .fptr = ch_rte_parsetype_pf,
+               .dmask = NULL,
+       },
 };
 
 static int
@@ -951,10 +981,6 @@ cxgbe_rtef_parse_items(struct rte_flow *flow,
 
                        repeat[i->type] = 1;
 
-                       /* No spec found for this pattern item. Skip it */
-                       if (!i->spec)
-                               break;
-
                        /* validate the item */
                        ret = cxgbe_validate_item(i, e);
                        if (ret)