+struct sfc_mae_parsed_item {
+ const struct rte_flow_item *item;
+ size_t proto_header_ofst;
+ size_t proto_header_size;
+};
+
+/*
+ * For each 16-bit word of the given header, override
+ * bits enforced by the corresponding 16-bit mask.
+ */
+static void
+sfc_mae_header_force_item_masks(uint8_t *header_buf,
+ const struct sfc_mae_parsed_item *parsed_items,
+ unsigned int nb_parsed_items)
+{
+ unsigned int item_idx;
+
+ for (item_idx = 0; item_idx < nb_parsed_items; ++item_idx) {
+ const struct sfc_mae_parsed_item *parsed_item;
+ const struct rte_flow_item *item;
+ size_t proto_header_size;
+ size_t ofst;
+
+ parsed_item = &parsed_items[item_idx];
+ proto_header_size = parsed_item->proto_header_size;
+ item = parsed_item->item;
+
+ for (ofst = 0; ofst < proto_header_size;
+ ofst += sizeof(rte_be16_t)) {
+ rte_be16_t *wp = RTE_PTR_ADD(header_buf, ofst);
+ const rte_be16_t *w_maskp;
+ const rte_be16_t *w_specp;
+
+ w_maskp = RTE_PTR_ADD(item->mask, ofst);
+ w_specp = RTE_PTR_ADD(item->spec, ofst);
+
+ *wp &= ~(*w_maskp);
+ *wp |= (*w_specp & *w_maskp);
+ }
+
+ header_buf += proto_header_size;
+ }
+}
+
+#define SFC_IPV4_TTL_DEF 0x40
+#define SFC_IPV6_VTC_FLOW_DEF 0x60000000
+#define SFC_IPV6_HOP_LIMITS_DEF 0xff
+#define SFC_VXLAN_FLAGS_DEF 0x08000000
+
+static int
+sfc_mae_rule_parse_action_vxlan_encap(
+ struct sfc_mae *mae,
+ const struct rte_flow_action_vxlan_encap *conf,
+ efx_mae_actions_t *spec,
+ struct rte_flow_error *error)
+{
+ struct sfc_mae_bounce_eh *bounce_eh = &mae->bounce_eh;
+ struct rte_flow_item *pattern = conf->definition;
+ uint8_t *buf = bounce_eh->buf;
+
+ /* This array will keep track of non-VOID pattern items. */
+ struct sfc_mae_parsed_item parsed_items[1 /* Ethernet */ +
+ 2 /* VLAN tags */ +
+ 1 /* IPv4 or IPv6 */ +
+ 1 /* UDP */ +
+ 1 /* VXLAN */];
+ unsigned int nb_parsed_items = 0;
+
+ size_t eth_ethertype_ofst = offsetof(struct rte_ether_hdr, ether_type);
+ uint8_t dummy_buf[RTE_MAX(sizeof(struct rte_ipv4_hdr),
+ sizeof(struct rte_ipv6_hdr))];
+ struct rte_ipv4_hdr *ipv4 = (void *)dummy_buf;
+ struct rte_ipv6_hdr *ipv6 = (void *)dummy_buf;
+ struct rte_vxlan_hdr *vxlan = NULL;
+ struct rte_udp_hdr *udp = NULL;
+ unsigned int nb_vlan_tags = 0;
+ size_t next_proto_ofst = 0;
+ size_t ethertype_ofst = 0;
+ uint64_t exp_items;
+ int rc;
+
+ if (pattern == NULL) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "The encap. header definition is NULL");
+ }
+
+ bounce_eh->type = EFX_TUNNEL_PROTOCOL_VXLAN;
+ bounce_eh->size = 0;
+
+ /*
+ * Process pattern items and remember non-VOID ones.
+ * Defer applying masks until after the complete header
+ * has been built from the pattern items.
+ */
+ exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_ETH);
+
+ for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; ++pattern) {
+ struct sfc_mae_parsed_item *parsed_item;
+ const uint64_t exp_items_extra_vlan[] = {
+ RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN), 0
+ };
+ size_t proto_header_size;
+ rte_be16_t *ethertypep;
+ uint8_t *next_protop;
+ uint8_t *buf_cur;
+
+ if (pattern->spec == NULL) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "NULL item spec in the encap. header");
+ }
+
+ if (pattern->mask == NULL) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "NULL item mask in the encap. header");
+ }
+
+ if (pattern->last != NULL) {
+ /* This is not a match pattern, so disallow range. */
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "Range item in the encap. header");
+ }
+
+ if (pattern->type == RTE_FLOW_ITEM_TYPE_VOID) {
+ /* Handle VOID separately, for clarity. */
+ continue;
+ }
+
+ if ((exp_items & RTE_BIT64(pattern->type)) == 0) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "Unexpected item in the encap. header");
+ }
+
+ parsed_item = &parsed_items[nb_parsed_items];
+ buf_cur = buf + bounce_eh->size;
+
+ switch (pattern->type) {
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_ETH,
+ exp_items);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_eth,
+ hdr) != 0);
+
+ proto_header_size = sizeof(struct rte_ether_hdr);
+
+ ethertype_ofst = eth_ethertype_ofst;
+
+ exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VLAN) |
+ RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
+ RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VLAN,
+ exp_items);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vlan,
+ hdr) != 0);
+
+ proto_header_size = sizeof(struct rte_vlan_hdr);
+
+ ethertypep = RTE_PTR_ADD(buf, eth_ethertype_ofst);
+ *ethertypep = RTE_BE16(RTE_ETHER_TYPE_QINQ);
+
+ ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
+ *ethertypep = RTE_BE16(RTE_ETHER_TYPE_VLAN);
+
+ ethertype_ofst =
+ bounce_eh->size +
+ offsetof(struct rte_vlan_hdr, eth_proto);
+
+ exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV4) |
+ RTE_BIT64(RTE_FLOW_ITEM_TYPE_IPV6);
+ exp_items |= exp_items_extra_vlan[nb_vlan_tags];
+
+ ++nb_vlan_tags;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV4,
+ exp_items);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv4,
+ hdr) != 0);
+
+ proto_header_size = sizeof(struct rte_ipv4_hdr);
+
+ ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
+ *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV4);
+
+ next_proto_ofst =
+ bounce_eh->size +
+ offsetof(struct rte_ipv4_hdr, next_proto_id);
+
+ ipv4 = (struct rte_ipv4_hdr *)buf_cur;
+
+ exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_IPV6,
+ exp_items);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_ipv6,
+ hdr) != 0);
+
+ proto_header_size = sizeof(struct rte_ipv6_hdr);
+
+ ethertypep = RTE_PTR_ADD(buf, ethertype_ofst);
+ *ethertypep = RTE_BE16(RTE_ETHER_TYPE_IPV6);
+
+ next_proto_ofst = bounce_eh->size +
+ offsetof(struct rte_ipv6_hdr, proto);
+
+ ipv6 = (struct rte_ipv6_hdr *)buf_cur;
+
+ exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_UDP);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_UDP,
+ exp_items);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_udp,
+ hdr) != 0);
+
+ proto_header_size = sizeof(struct rte_udp_hdr);
+
+ next_protop = RTE_PTR_ADD(buf, next_proto_ofst);
+ *next_protop = IPPROTO_UDP;
+
+ udp = (struct rte_udp_hdr *)buf_cur;
+
+ exp_items = RTE_BIT64(RTE_FLOW_ITEM_TYPE_VXLAN);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ITEM_TYPE_VXLAN,
+ exp_items);
+ RTE_BUILD_BUG_ON(offsetof(struct rte_flow_item_vxlan,
+ hdr) != 0);
+
+ proto_header_size = sizeof(struct rte_vxlan_hdr);
+
+ vxlan = (struct rte_vxlan_hdr *)buf_cur;
+
+ udp->dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT);
+ udp->dgram_len = RTE_BE16(sizeof(*udp) +
+ sizeof(*vxlan));
+ udp->dgram_cksum = 0;
+
+ exp_items = 0;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "Unknown item in the encap. header");
+ }
+
+ if (bounce_eh->size + proto_header_size > bounce_eh->buf_size) {
+ return rte_flow_error_set(error, E2BIG,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "The encap. header is too big");
+ }
+
+ if ((proto_header_size & 1) != 0) {
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "Odd layer size in the encap. header");
+ }
+
+ rte_memcpy(buf_cur, pattern->spec, proto_header_size);
+ bounce_eh->size += proto_header_size;
+
+ parsed_item->item = pattern;
+ parsed_item->proto_header_size = proto_header_size;
+ ++nb_parsed_items;
+ }
+
+ if (exp_items != 0) {
+ /* Parsing item VXLAN would have reset exp_items to 0. */
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
+ "No item VXLAN in the encap. header");
+ }
+
+ /* One of the pointers (ipv4, ipv6) refers to a dummy area. */
+ ipv4->version_ihl = RTE_IPV4_VHL_DEF;
+ ipv4->time_to_live = SFC_IPV4_TTL_DEF;
+ ipv4->total_length = RTE_BE16(sizeof(*ipv4) + sizeof(*udp) +
+ sizeof(*vxlan));
+ /* The HW cannot compute this checksum. */
+ ipv4->hdr_checksum = 0;
+ ipv4->hdr_checksum = rte_ipv4_cksum(ipv4);
+
+ ipv6->vtc_flow = RTE_BE32(SFC_IPV6_VTC_FLOW_DEF);
+ ipv6->hop_limits = SFC_IPV6_HOP_LIMITS_DEF;
+ ipv6->payload_len = udp->dgram_len;
+
+ vxlan->vx_flags = RTE_BE32(SFC_VXLAN_FLAGS_DEF);
+
+ /* Take care of the masks. */
+ sfc_mae_header_force_item_masks(buf, parsed_items, nb_parsed_items);
+
+ rc = efx_mae_action_set_populate_encap(spec);
+ if (rc != 0) {
+ rc = rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "failed to request action ENCAP");
+ }
+
+ return rc;
+}
+