+/**
+ * Apart from the 4-tuple IPv4/IPv6 - TCP/UDP information,
+ * there's only 40-bits available to store match fields.
+ * So, to save space, optimize filter spec for some common
+ * known fields that hardware can parse against incoming
+ * packets automatically.
+ */
+static void
+cxgbe_tweak_filter_spec(struct adapter *adap,
+ struct ch_filter_specification *fs)
+{
+ /* Save 16-bit ethertype field space, by setting corresponding
+ * 1-bit flags in the filter spec for common known ethertypes.
+ * When hardware sees these flags, it automatically infers and
+ * matches incoming packets against the corresponding ethertype.
+ */
+ if (fs->mask.ethtype == 0xffff) {
+ switch (fs->val.ethtype) {
+ case RTE_ETHER_TYPE_IPV4:
+ if (adap->params.tp.ethertype_shift < 0) {
+ fs->type = FILTER_TYPE_IPV4;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ case RTE_ETHER_TYPE_IPV6:
+ if (adap->params.tp.ethertype_shift < 0) {
+ fs->type = FILTER_TYPE_IPV6;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ case RTE_ETHER_TYPE_VLAN:
+ if (adap->params.tp.ethertype_shift < 0 &&
+ adap->params.tp.vlan_shift >= 0) {
+ fs->val.ivlan_vld = 1;
+ fs->mask.ivlan_vld = 1;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ case RTE_ETHER_TYPE_QINQ:
+ if (adap->params.tp.ethertype_shift < 0 &&
+ adap->params.tp.vnic_shift >= 0) {
+ fs->val.ovlan_vld = 1;
+ fs->mask.ovlan_vld = 1;
+ fs->val.ethtype = 0;
+ fs->mask.ethtype = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+cxgbe_fill_filter_region(struct adapter *adap,
+ struct ch_filter_specification *fs)
+{
+ struct tp_params *tp = &adap->params.tp;
+ u64 hash_filter_mask = tp->hash_filter_mask;
+ u64 ntuple_mask = 0;
+
+ fs->cap = 0;
+
+ if (!is_hashfilter(adap))
+ return;
+
+ if (fs->type) {
+ uint8_t biton[16] = {0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff};
+ uint8_t bitoff[16] = {0};
+
+ if (!memcmp(fs->val.lip, bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, biton, sizeof(biton)))
+ return;
+ } else {
+ uint32_t biton = 0xffffffff;
+ uint32_t bitoff = 0x0U;
+
+ if (!memcmp(fs->val.lip, &bitoff, sizeof(bitoff)) ||
+ !memcmp(fs->val.fip, &bitoff, sizeof(bitoff)) ||
+ memcmp(fs->mask.lip, &biton, sizeof(biton)) ||
+ memcmp(fs->mask.fip, &biton, sizeof(biton)))
+ return;
+ }
+
+ if (!fs->val.lport || fs->mask.lport != 0xffff)
+ return;
+ if (!fs->val.fport || fs->mask.fport != 0xffff)
+ return;
+
+ if (tp->protocol_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
+ if (tp->ethertype_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
+ if (tp->port_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
+ if (tp->macmatch_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
+ if (tp->vlan_shift >= 0 && fs->mask.ivlan_vld)
+ ntuple_mask |= (u64)(F_FT_VLAN_VLD | fs->mask.ivlan) <<
+ tp->vlan_shift;
+ if (tp->vnic_shift >= 0) {
+ if (fs->mask.ovlan_vld)
+ ntuple_mask |= (u64)(fs->val.ovlan_vld << 16 |
+ fs->mask.ovlan) << tp->vnic_shift;
+ else if (fs->mask.pfvf_vld)
+ ntuple_mask |= (u64)(fs->mask.pfvf_vld << 16 |
+ fs->mask.pf << 13 |
+ fs->mask.vf) << tp->vnic_shift;
+ }
+ if (tp->tos_shift >= 0)
+ ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
+
+ if (ntuple_mask != hash_filter_mask)
+ return;
+
+ fs->cap = 1; /* use hash region */
+}
+
+static int
+ch_rte_parsetype_eth(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_eth *spec = item->spec;
+ const struct rte_flow_item_eth *umask = item->mask;
+ const struct rte_flow_item_eth *mask;
+
+ /* If user has not given any mask, then use chelsio supported mask. */
+ mask = umask ? umask : (const struct rte_flow_item_eth *)dmask;
+
+ if (!spec)
+ return 0;
+
+ /* we don't support SRC_MAC filtering*/
+ if (!rte_is_zero_ether_addr(&mask->src))
+ return rte_flow_error_set(e, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "src mac filtering not supported");
+
+ if (!rte_is_zero_ether_addr(&mask->dst)) {
+ const u8 *addr = (const u8 *)&spec->dst.addr_bytes[0];
+ const u8 *m = (const u8 *)&mask->dst.addr_bytes[0];
+ struct rte_flow *flow = (struct rte_flow *)fs->private;
+ struct port_info *pi = (struct port_info *)
+ (flow->dev->data->dev_private);
+ int idx;
+
+ idx = cxgbe_mpstcam_alloc(pi, addr, m);
+ if (idx <= 0)
+ return rte_flow_error_set(e, idx,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "unable to allocate mac"
+ " entry in h/w");
+ CXGBE_FILL_FS(idx, 0x1ff, macidx);
+ }
+
+ CXGBE_FILL_FS(be16_to_cpu(spec->type),
+ be16_to_cpu(mask->type), ethtype);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_port(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_phy_port *val = item->spec;
+ const struct rte_flow_item_phy_port *umask = item->mask;
+ const struct rte_flow_item_phy_port *mask;
+
+ mask = umask ? umask : (const struct rte_flow_item_phy_port *)dmask;
+
+ if (!val)
+ return 0; /* Wildcard, match all physical ports */
+
+ if (val->index > 0x7)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "port index upto 0x7 is supported");
+
+ CXGBE_FILL_FS(val->index, mask->index, iport);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_vlan(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_vlan *spec = item->spec;
+ const struct rte_flow_item_vlan *umask = item->mask;
+ const struct rte_flow_item_vlan *mask;
+
+ /* If user has not given any mask, then use chelsio supported mask. */
+ mask = umask ? umask : (const struct rte_flow_item_vlan *)dmask;
+
+ if (!fs->mask.ethtype)
+ return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Can't parse VLAN item without knowing ethertype");
+
+ /* If ethertype is already set and is not VLAN (0x8100) or
+ * QINQ(0x88A8), then don't proceed further. Otherwise,
+ * reset the outer ethertype, so that it can be replaced by
+ * innermost ethertype. Note that hardware will automatically
+ * match against VLAN or QINQ packets, based on 'ivlan_vld' or
+ * 'ovlan_vld' bit set in Chelsio filter spec, respectively.
+ */
+ if (fs->mask.ethtype) {
+ if (fs->val.ethtype != RTE_ETHER_TYPE_VLAN &&
+ fs->val.ethtype != RTE_ETHER_TYPE_QINQ)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Ethertype must be 0x8100 or 0x88a8");
+ }
+
+ if (fs->val.ethtype == RTE_ETHER_TYPE_QINQ) {
+ CXGBE_FILL_FS(1, 1, ovlan_vld);
+ if (spec) {
+ CXGBE_FILL_FS(be16_to_cpu(spec->tci),
+ be16_to_cpu(mask->tci), ovlan);
+
+ fs->mask.ethtype = 0;
+ fs->val.ethtype = 0;
+ }
+ } else if (fs->val.ethtype == RTE_ETHER_TYPE_VLAN) {
+ CXGBE_FILL_FS(1, 1, ivlan_vld);
+ if (spec) {
+ CXGBE_FILL_FS(be16_to_cpu(spec->tci),
+ be16_to_cpu(mask->tci), ivlan);
+
+ fs->mask.ethtype = 0;
+ fs->val.ethtype = 0;
+ }
+ }
+
+ if (spec)
+ CXGBE_FILL_FS(be16_to_cpu(spec->inner_type),
+ be16_to_cpu(mask->inner_type), ethtype);
+
+ return 0;
+}
+
+static int
+ch_rte_parsetype_pf(const void *dmask __rte_unused,
+ const struct rte_flow_item *item __rte_unused,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e __rte_unused)
+{
+ struct rte_flow *flow = (struct rte_flow *)fs->private;
+ struct rte_eth_dev *dev = flow->dev;
+ struct adapter *adap = ethdev2adap(dev);
+
+ CXGBE_FILL_FS(1, 1, pfvf_vld);
+
+ CXGBE_FILL_FS(adap->pf, 0x7, pf);
+ return 0;
+}
+
+static int
+ch_rte_parsetype_vf(const void *dmask, const struct rte_flow_item *item,
+ struct ch_filter_specification *fs,
+ struct rte_flow_error *e)
+{
+ const struct rte_flow_item_vf *umask = item->mask;
+ const struct rte_flow_item_vf *val = item->spec;
+ const struct rte_flow_item_vf *mask;
+
+ /* If user has not given any mask, then use chelsio supported mask. */
+ mask = umask ? umask : (const struct rte_flow_item_vf *)dmask;
+
+ CXGBE_FILL_FS(1, 1, pfvf_vld);
+
+ if (!val)
+ return 0; /* Wildcard, match all Vf */
+
+ if (val->id > UCHAR_MAX)
+ return rte_flow_error_set(e, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VF ID > MAX(255)");
+
+ CXGBE_FILL_FS(val->id, mask->id, vf);
+
+ return 0;
+}
+