#define IAVF_PHINT_GTPU_EH BIT_ULL(1)
#define IAVF_PHINT_GTPU_EH_DWN BIT_ULL(2)
#define IAVF_PHINT_GTPU_EH_UP BIT_ULL(3)
+#define IAVF_PHINT_OUTER_IPV4 BIT_ULL(4)
+#define IAVF_PHINT_OUTER_IPV6 BIT_ULL(5)
#define IAVF_PHINT_GTPU_MSK (IAVF_PHINT_GTPU | \
IAVF_PHINT_GTPU_EH | \
IAVF_PHINT_GTPU_EH_DWN | \
IAVF_PHINT_GTPU_EH_UP)
+#define IAVF_PHINT_LAYERS_MSK (IAVF_PHINT_OUTER_IPV4 | \
+ IAVF_PHINT_OUTER_IPV6)
+
#define IAVF_GTPU_EH_DWNLINK 0
#define IAVF_GTPU_EH_UPLINK 1
}
static int
-iavf_hash_parse_pattern(struct iavf_pattern_match_item *pattern_match_item,
- const struct rte_flow_item pattern[], uint64_t *phint,
+iavf_hash_parse_pattern(const struct rte_flow_item pattern[], uint64_t *phint,
struct rte_flow_error *error)
{
const struct rte_flow_item *item = pattern;
}
switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ if (!(*phint & IAVF_PHINT_GTPU_MSK))
+ *phint |= IAVF_PHINT_OUTER_IPV4;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ if (!(*phint & IAVF_PHINT_GTPU_MSK))
+ *phint |= IAVF_PHINT_OUTER_IPV6;
+ break;
case RTE_FLOW_ITEM_TYPE_GTPU:
*phint |= IAVF_PHINT_GTPU;
break;
}
}
- /* update and restore pattern hint */
- *phint |= *(uint64_t *)(pattern_match_item->meta);
-
return 0;
}
#define REFINE_PROTO_FLD(op, fld) \
VIRTCHNL_##op##_PROTO_HDR_FIELD(hdr, VIRTCHNL_PROTO_HDR_##fld)
+#define REPALCE_PROTO_FLD(fld_1, fld_2) \
+do { \
+ REFINE_PROTO_FLD(DEL, fld_1); \
+ REFINE_PROTO_FLD(ADD, fld_2); \
+} while (0)
/* refine proto hdrs base on l2, l3, l4 rss type */
static void
} else {
hdr->field_selector = 0;
}
+ if (rss_type & RTE_ETH_RSS_L3_PRE64) {
+ if (REFINE_PROTO_FLD(TEST, IPV6_SRC))
+ REPALCE_PROTO_FLD(IPV6_SRC,
+ IPV6_PREFIX64_SRC);
+ if (REFINE_PROTO_FLD(TEST, IPV6_DST))
+ REPALCE_PROTO_FLD(IPV6_DST,
+ IPV6_PREFIX64_DST);
+ }
break;
case VIRTCHNL_PROTO_HDR_UDP:
if (rss_type &
{
struct virtchnl_proto_hdr *hdr1;
struct virtchnl_proto_hdr *hdr2;
- int i;
+ int i, shift_count = 1;
if (!(phint & IAVF_PHINT_GTPU_MSK))
return;
+ if (phint & IAVF_PHINT_LAYERS_MSK)
+ shift_count++;
+
if (proto_hdrs->tunnel_level == TUNNEL_LEVEL_INNER) {
- /* shift headers 1 layer */
- for (i = proto_hdrs->count; i > 0; i--) {
+ /* shift headers layer */
+ for (i = proto_hdrs->count - 1 + shift_count;
+ i > shift_count - 1; i--) {
hdr1 = &proto_hdrs->proto_hdr[i];
- hdr2 = &proto_hdrs->proto_hdr[i - 1];
-
+ hdr2 = &proto_hdrs->proto_hdr[i - shift_count];
*hdr1 = *hdr2;
}
- /* adding gtpu header at layer 0 */
- hdr1 = &proto_hdrs->proto_hdr[0];
+ if (shift_count == 1) {
+ /* adding gtpu header at layer 0 */
+ hdr1 = &proto_hdrs->proto_hdr[0];
+ } else {
+ /* adding gtpu header and outer ip header */
+ hdr1 = &proto_hdrs->proto_hdr[1];
+ hdr2 = &proto_hdrs->proto_hdr[0];
+ hdr2->field_selector = 0;
+ proto_hdrs->count++;
+ proto_hdrs->tunnel_level = TUNNEL_LEVEL_OUTER;
+
+ if (phint & IAVF_PHINT_OUTER_IPV4)
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr2, IPV4);
+ else if (phint & IAVF_PHINT_OUTER_IPV6)
+ VIRTCHNL_SET_PROTO_HDR_TYPE(hdr2, IPV6);
+ }
} else {
hdr1 = &proto_hdrs->proto_hdr[proto_hdrs->count];
}
RTE_ETH_RSS_L3_PRE96
};
+struct rss_attr_type {
+ uint64_t attr;
+ uint64_t type;
+};
+
+#define VALID_RSS_IPV4_L4 (ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_SCTP)
+
+#define VALID_RSS_IPV6_L4 (ETH_RSS_NONFRAG_IPV6_UDP | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_SCTP)
+
+#define VALID_RSS_IPV4 (ETH_RSS_IPV4 | VALID_RSS_IPV4_L4)
+#define VALID_RSS_IPV6 (ETH_RSS_IPV6 | VALID_RSS_IPV6_L4)
+#define VALID_RSS_L3 (VALID_RSS_IPV4 | VALID_RSS_IPV6)
+#define VALID_RSS_L4 (VALID_RSS_IPV4_L4 | VALID_RSS_IPV6_L4)
+
+#define VALID_RSS_ATTR (ETH_RSS_L3_SRC_ONLY | \
+ ETH_RSS_L3_DST_ONLY | \
+ ETH_RSS_L4_SRC_ONLY | \
+ ETH_RSS_L4_DST_ONLY | \
+ ETH_RSS_L2_SRC_ONLY | \
+ ETH_RSS_L2_DST_ONLY | \
+ RTE_ETH_RSS_L3_PRE64)
+
+#define INVALID_RSS_ATTR (RTE_ETH_RSS_L3_PRE32 | \
+ RTE_ETH_RSS_L3_PRE40 | \
+ RTE_ETH_RSS_L3_PRE48 | \
+ RTE_ETH_RSS_L3_PRE56 | \
+ RTE_ETH_RSS_L3_PRE96)
+
+static struct rss_attr_type rss_attr_to_valid_type[] = {
+ {ETH_RSS_L2_SRC_ONLY | ETH_RSS_L2_DST_ONLY, ETH_RSS_ETH},
+ {ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY, VALID_RSS_L3},
+ {ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY, VALID_RSS_L4},
+ /* current ipv6 prefix only supports prefix 64 bits*/
+ {RTE_ETH_RSS_L3_PRE64, VALID_RSS_IPV6},
+ {INVALID_RSS_ATTR, 0}
+};
+
static bool
iavf_any_invalid_rss_type(uint64_t rss_type, uint64_t allow_rss_type)
{
return true;
}
+ /* check invalid RSS attribute */
+ for (i = 0; i < RTE_DIM(rss_attr_to_valid_type); i++) {
+ struct rss_attr_type *rat = &rss_attr_to_valid_type[i];
+
+ if (rat->attr & rss_type && !(rat->type & rss_type))
+ return true;
+ }
+
/* check not allowed RSS type */
-#define _RSS_ATTR_ (ETH_RSS_L3_SRC_ONLY | \
- ETH_RSS_L3_DST_ONLY | \
- ETH_RSS_L4_SRC_ONLY | \
- ETH_RSS_L4_DST_ONLY | \
- ETH_RSS_L2_SRC_ONLY | \
- ETH_RSS_L2_DST_ONLY | \
- RTE_ETH_RSS_L3_PRE32 | \
- RTE_ETH_RSS_L3_PRE40 | \
- RTE_ETH_RSS_L3_PRE48 | \
- RTE_ETH_RSS_L3_PRE56 | \
- RTE_ETH_RSS_L3_PRE64 | \
- RTE_ETH_RSS_L3_PRE96)
-
- rss_type &= ~_RSS_ATTR_;
+ rss_type &= ~VALID_RSS_ATTR;
return ((rss_type & allow_rss_type) != rss_type);
}
goto error;
}
- ret = iavf_hash_parse_pattern(pattern_match_item, pattern, &phint,
- error);
+ ret = iavf_hash_parse_pattern(pattern, &phint, error);
if (ret)
goto error;
struct rte_flow *flow,
__rte_unused struct rte_flow_error *error)
{
+ struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
struct virtchnl_rss_cfg *rss_cfg;
int ret = 0;
+ if (vf->vf_reset)
+ return 0;
+
rss_cfg = (struct virtchnl_rss_cfg *)flow->rule;
ret = iavf_add_del_rss_cfg(ad, rss_cfg, false);