+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, items,
+ "inner tunnel port id"
+ " item is not supported");
+ mask.port_id = flow_tcf_item_mask
+ (items, &rte_flow_item_port_id_mask,
+ &flow_tcf_mask_supported.port_id,
+ &flow_tcf_mask_empty.port_id,
+ sizeof(flow_tcf_mask_supported.port_id),
+ error);
+ if (!mask.port_id)
+ return -rte_errno;
+ if (mask.port_id == &flow_tcf_mask_empty.port_id) {
+ in_port_id_set = 1;
+ break;
+ }
+ spec.port_id = items->spec;
+ if (mask.port_id->id && mask.port_id->id != 0xffffffff)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.port_id,
+ "no support for partial mask on"
+ " \"id\" field");
+ if (!mask.port_id->id)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == spec.port_id->id)
+ break;
+ if (!ptoi[i].ifindex)
+ return rte_flow_error_set
+ (error, ENODEV,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec.port_id,
+ "missing data to convert port ID to"
+ " ifindex");
+ if (in_port_id_set && ptoi[i].ifindex != tcm_ifindex)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ spec.port_id,
+ "cannot match traffic for"
+ " several port IDs through"
+ " a single flow rule");
+ tcm_ifindex = ptoi[i].ifindex;
+ in_port_id_set = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_validate_item_eth(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ /* TODO:
+ * Redundant check due to different supported mask.
+ * Same for the rest of items.
+ */
+ mask.eth = flow_tcf_item_mask
+ (items, &rte_flow_item_eth_mask,
+ &flow_tcf_mask_supported.eth,
+ &flow_tcf_mask_empty.eth,
+ sizeof(flow_tcf_mask_supported.eth),
+ error);
+ if (!mask.eth)
+ return -rte_errno;
+ if (mask.eth->type && mask.eth->type !=
+ RTE_BE16(0xffff))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.eth,
+ "no support for partial mask on"
+ " \"type\" field");
+ assert(items->spec);
+ spec.eth = items->spec;
+ if (mask.eth->type &&
+ (item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
+ inner_etype != RTE_BE16(ETH_P_ALL) &&
+ inner_etype != spec.eth->type)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "inner eth_type conflict");
+ if (mask.eth->type &&
+ !(item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
+ outer_etype != RTE_BE16(ETH_P_ALL) &&
+ outer_etype != spec.eth->type)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "outer eth_type conflict");
+ if (mask.eth->type) {
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ inner_etype = spec.eth->type;
+ else
+ outer_etype = spec.eth->type;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, items,
+ "inner tunnel VLAN"
+ " is not supported");
+ ret = mlx5_flow_validate_item_vlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_OUTER_VLAN;
+ mask.vlan = flow_tcf_item_mask
+ (items, &rte_flow_item_vlan_mask,
+ &flow_tcf_mask_supported.vlan,
+ &flow_tcf_mask_empty.vlan,
+ sizeof(flow_tcf_mask_supported.vlan),
+ error);
+ if (!mask.vlan)
+ return -rte_errno;
+ if ((mask.vlan->tci & RTE_BE16(0xe000) &&
+ (mask.vlan->tci & RTE_BE16(0xe000)) !=
+ RTE_BE16(0xe000)) ||
+ (mask.vlan->tci & RTE_BE16(0x0fff) &&
+ (mask.vlan->tci & RTE_BE16(0x0fff)) !=
+ RTE_BE16(0x0fff)) ||
+ (mask.vlan->inner_type &&
+ mask.vlan->inner_type != RTE_BE16(0xffff)))
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.vlan,
+ "no support for partial masks on"
+ " \"tci\" (PCP and VID parts) and"
+ " \"inner_type\" fields");
+ if (outer_etype != RTE_BE16(ETH_P_ALL) &&
+ outer_etype != RTE_BE16(ETH_P_8021Q))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "outer eth_type conflict,"
+ " must be 802.1Q");
+ outer_etype = RTE_BE16(ETH_P_8021Q);
+ assert(items->spec);
+ spec.vlan = items->spec;
+ if (mask.vlan->inner_type &&
+ vlan_etype != RTE_BE16(ETH_P_ALL) &&
+ vlan_etype != spec.vlan->inner_type)
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "vlan eth_type conflict");
+ if (mask.vlan->inner_type)
+ vlan_etype = spec.vlan->inner_type;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ ret = mlx5_flow_validate_item_ipv4
+ (items, item_flags,
+ &flow_tcf_mask_supported.ipv4, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ mask.ipv4 = flow_tcf_item_mask
+ (items, &rte_flow_item_ipv4_mask,
+ &flow_tcf_mask_supported.ipv4,
+ &flow_tcf_mask_empty.ipv4,
+ sizeof(flow_tcf_mask_supported.ipv4),
+ error);
+ if (!mask.ipv4)
+ return -rte_errno;
+ if (mask.ipv4->hdr.next_proto_id &&
+ mask.ipv4->hdr.next_proto_id != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.ipv4,
+ "no support for partial mask on"
+ " \"hdr.next_proto_id\" field");
+ else if (mask.ipv4->hdr.next_proto_id)
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
+ if (inner_etype != RTE_BE16(ETH_P_ALL) &&
+ inner_etype != RTE_BE16(ETH_P_IP))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "inner eth_type conflict,"
+ " IPv4 is required");
+ inner_etype = RTE_BE16(ETH_P_IP);
+ } else if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN) {
+ if (vlan_etype != RTE_BE16(ETH_P_ALL) &&
+ vlan_etype != RTE_BE16(ETH_P_IP))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "vlan eth_type conflict,"
+ " IPv4 is required");
+ vlan_etype = RTE_BE16(ETH_P_IP);
+ } else {
+ if (outer_etype != RTE_BE16(ETH_P_ALL) &&
+ outer_etype != RTE_BE16(ETH_P_IP))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "eth_type conflict,"
+ " IPv4 is required");
+ outer_etype = RTE_BE16(ETH_P_IP);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ ret = mlx5_flow_validate_item_ipv6
+ (items, item_flags,
+ &flow_tcf_mask_supported.ipv6, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ mask.ipv6 = flow_tcf_item_mask
+ (items, &rte_flow_item_ipv6_mask,
+ &flow_tcf_mask_supported.ipv6,
+ &flow_tcf_mask_empty.ipv6,
+ sizeof(flow_tcf_mask_supported.ipv6),
+ error);
+ if (!mask.ipv6)
+ return -rte_errno;
+ if (mask.ipv6->hdr.proto &&
+ mask.ipv6->hdr.proto != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.ipv6,
+ "no support for partial mask on"
+ " \"hdr.proto\" field");
+ else if (mask.ipv6->hdr.proto)
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ (items->spec))->hdr.proto;
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
+ if (inner_etype != RTE_BE16(ETH_P_ALL) &&
+ inner_etype != RTE_BE16(ETH_P_IPV6))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "inner eth_type conflict,"
+ " IPv6 is required");
+ inner_etype = RTE_BE16(ETH_P_IPV6);
+ } else if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN) {
+ if (vlan_etype != RTE_BE16(ETH_P_ALL) &&
+ vlan_etype != RTE_BE16(ETH_P_IPV6))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "vlan eth_type conflict,"
+ " IPv6 is required");
+ vlan_etype = RTE_BE16(ETH_P_IPV6);
+ } else {
+ if (outer_etype != RTE_BE16(ETH_P_ALL) &&
+ outer_etype != RTE_BE16(ETH_P_IPV6))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items,
+ "eth_type conflict,"
+ " IPv6 is required");
+ outer_etype = RTE_BE16(ETH_P_IPV6);
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_validate_item_udp(items, item_flags,
+ next_protocol, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ mask.udp = flow_tcf_item_mask
+ (items, &rte_flow_item_udp_mask,
+ &flow_tcf_mask_supported.udp,
+ &flow_tcf_mask_empty.udp,
+ sizeof(flow_tcf_mask_supported.udp),
+ error);
+ if (!mask.udp)
+ return -rte_errno;
+ /*
+ * Save the presumed outer UDP item for extra check
+ * if the tunnel item will be found later in the list.
+ */
+ if (!(item_flags & MLX5_FLOW_LAYER_TUNNEL))
+ outer_udp = items;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_validate_item_tcp
+ (items, item_flags,
+ next_protocol,
+ &flow_tcf_mask_supported.tcp,
+ error);
+ if (ret < 0)
+ return ret;
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ mask.tcp = flow_tcf_item_mask
+ (items, &rte_flow_item_tcp_mask,
+ &flow_tcf_mask_supported.tcp,
+ &flow_tcf_mask_empty.tcp,
+ sizeof(flow_tcf_mask_supported.tcp),
+ error);
+ if (!mask.tcp)
+ return -rte_errno;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ if (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM, items,
+ "vxlan tunnel over vlan"
+ " is not supported");
+ ret = mlx5_flow_validate_item_vxlan(items,
+ item_flags, error);
+ if (ret < 0)
+ return ret;
+ item_flags |= MLX5_FLOW_LAYER_VXLAN;
+ mask.vxlan = flow_tcf_item_mask
+ (items, &rte_flow_item_vxlan_mask,
+ &flow_tcf_mask_supported.vxlan,
+ &flow_tcf_mask_empty.vxlan,
+ sizeof(flow_tcf_mask_supported.vxlan), error);
+ if (!mask.vxlan)
+ return -rte_errno;
+ if (mask.vxlan->vni[0] != 0xff ||
+ mask.vxlan->vni[1] != 0xff ||
+ mask.vxlan->vni[2] != 0xff)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM_MASK,
+ mask.vxlan,
+ "no support for partial or "
+ "empty mask on \"vxlan.vni\" field");
+ /*
+ * The VNI item assumes the VXLAN tunnel, it requires
+ * at least the outer destination UDP port must be
+ * specified without wildcards to allow kernel select
+ * the virtual VXLAN device by port. Also outer IPv4
+ * or IPv6 item must be specified (wilcards or even
+ * zero mask are allowed) to let driver know the tunnel
+ * IP version and process UDP traffic correctly.
+ */
+ if (!(item_flags &
+ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 |
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no outer IP pattern found"
+ " for vxlan tunnel");
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+ return rte_flow_error_set
+ (error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no outer UDP pattern found"
+ " for vxlan tunnel");
+ /*
+ * All items preceding the tunnel item become outer
+ * ones and we should do extra validation for them
+ * due to tc limitations for tunnel outer parameters.
+ * Currently only outer UDP item requres extra check,
+ * use the saved pointer instead of item list rescan.
+ */
+ assert(outer_udp);
+ ret = flow_tcf_validate_vxlan_decap_udp
+ (outer_udp, error);
+ if (ret < 0)
+ return ret;
+ /* Reset L4 protocol for inner parameters. */
+ next_protocol = 0xff;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ items, "item not supported");
+ }
+ }
+ if ((action_flags & MLX5_TCF_PEDIT_ACTIONS) &&
+ (action_flags & MLX5_FLOW_ACTION_DROP))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "set action is not compatible with "
+ "drop action");
+ if ((action_flags & MLX5_TCF_PEDIT_ACTIONS) &&
+ !(action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "set action must be followed by "
+ "port_id action");
+ if (action_flags &
+ (MLX5_FLOW_ACTION_SET_IPV4_SRC | MLX5_FLOW_ACTION_SET_IPV4_DST)) {
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no ipv4 item found in"
+ " pattern");
+ }
+ if (action_flags &
+ (MLX5_FLOW_ACTION_SET_IPV6_SRC | MLX5_FLOW_ACTION_SET_IPV6_DST)) {
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV6))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no ipv6 item found in"
+ " pattern");
+ }
+ if (action_flags &
+ (MLX5_FLOW_ACTION_SET_TP_SRC | MLX5_FLOW_ACTION_SET_TP_DST)) {
+ if (!(item_flags &
+ (MLX5_FLOW_LAYER_OUTER_L4_UDP |
+ MLX5_FLOW_LAYER_OUTER_L4_TCP)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no TCP/UDP item found in"
+ " pattern");
+ }
+ /*
+ * FW syndrome (0xA9C090):
+ * set_flow_table_entry: push vlan action fte in fdb can ONLY be
+ * forward to the uplink.
+ */
+ if ((action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN) &&
+ (action_flags & MLX5_FLOW_ACTION_PORT_ID) &&
+ ((struct mlx5_priv *)port_id_dev->data->dev_private)->representor)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "vlan push can only be applied"
+ " when forwarding to uplink port");
+ /*
+ * FW syndrome (0x294609):
+ * set_flow_table_entry: modify/pop/push actions in fdb flow table
+ * are supported only while forwarding to vport.
+ */
+ if ((action_flags & MLX5_TCF_VLAN_ACTIONS) &&
+ !(action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "vlan actions are supported"
+ " only with port_id action");
+ if ((action_flags & MLX5_TCF_VXLAN_ACTIONS) &&
+ !(action_flags & MLX5_FLOW_ACTION_PORT_ID))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "vxlan actions are supported"
+ " only with port_id action");
+ if (!(action_flags & MLX5_TCF_FATE_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "no fate action is found");
+ if (action_flags &
+ (MLX5_FLOW_ACTION_SET_TTL | MLX5_FLOW_ACTION_DEC_TTL)) {
+ if (!(item_flags &
+ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 |
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no IP found in pattern");
+ }
+ if (action_flags &
+ (MLX5_FLOW_ACTION_SET_MAC_SRC | MLX5_FLOW_ACTION_SET_MAC_DST)) {
+ if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L2))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no ethernet found in"
+ " pattern");
+ }
+ if ((action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) &&
+ !(item_flags & MLX5_FLOW_LAYER_VXLAN))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "no VNI pattern found"
+ " for vxlan decap action");
+ if ((action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP) &&
+ (item_flags & MLX5_FLOW_LAYER_TUNNEL))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "vxlan encap not supported"
+ " for tunneled traffic");
+ return 0;
+}
+
+/**
+ * Calculate maximum size of memory for flow items of Linux TC flower.
+ *
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[out] action_flags
+ * Pointer to the detected actions.
+ *
+ * @return
+ * Maximum size of memory for items.
+ */
+static int
+flow_tcf_get_items_size(const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ uint64_t *action_flags)
+{
+ int size = 0;
+
+ size += SZ_NLATTR_STRZ_OF("flower") +
+ SZ_NLATTR_TYPE_OF(uint16_t) + /* Outer ether type. */
+ SZ_NLATTR_NEST + /* TCA_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(uint32_t); /* TCA_CLS_FLAGS_SKIP_SW. */
+ if (attr->group > 0)
+ size += SZ_NLATTR_TYPE_OF(uint32_t); /* TCA_CHAIN. */
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ size += SZ_NLATTR_DATA_OF(RTE_ETHER_ADDR_LEN) * 4;
+ /* dst/src MAC addr and mask. */
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ size += SZ_NLATTR_TYPE_OF(uint16_t) +
+ /* VLAN Ether type. */
+ SZ_NLATTR_TYPE_OF(uint8_t) + /* VLAN prio. */
+ SZ_NLATTR_TYPE_OF(uint16_t); /* VLAN ID. */
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4: {
+ const struct rte_flow_item_ipv4 *ipv4 = items->mask;
+
+ size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
+ SZ_NLATTR_TYPE_OF(uint32_t) * 4;
+ /* dst/src IP addr and mask. */
+ if (ipv4 && ipv4->hdr.time_to_live)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ if (ipv4 && ipv4->hdr.type_of_service)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_IPV6: {
+ const struct rte_flow_item_ipv6 *ipv6 = items->mask;
+
+ size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
+ SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 4;
+ /* dst/src IP addr and mask. */
+ if (ipv6 && ipv6->hdr.hop_limits)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
+ (0xfful << RTE_IPV6_HDR_TC_SHIFT)))
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
+ SZ_NLATTR_TYPE_OF(uint16_t) * 4;
+ /* dst/src port and mask. */
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ size += SZ_NLATTR_TYPE_OF(uint8_t) + /* IP proto. */
+ SZ_NLATTR_TYPE_OF(uint16_t) * 4;
+ /* dst/src port and mask. */
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ size += SZ_NLATTR_TYPE_OF(uint32_t);
+ /*
+ * There might be no VXLAN decap action in the action
+ * list, nonetheless the VXLAN tunnel flow requires
+ * the decap structure to be correctly applied to
+ * VXLAN device, set the flag to create the structure.
+ * Translation routine will not put the decap action
+ * in tne Netlink message if there is no actual action
+ * in the list.
+ */
+ *action_flags |= MLX5_FLOW_ACTION_VXLAN_DECAP;
+ break;
+ default:
+ DRV_LOG(WARNING,
+ "unsupported item %p type %d,"
+ " items must be validated before flow creation",
+ (const void *)items, items->type);
+ break;
+ }
+ }
+ return size;
+}
+
+/**
+ * Calculate size of memory to store the VXLAN encapsultion
+ * related items in the Netlink message buffer. Items list
+ * is specified by RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action.
+ * The item list should be validated.
+ *
+ * @param[in] action
+ * RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action object.
+ * List of pattern items to scan data from.
+ *
+ * @return
+ * The size the part of Netlink message buffer to store the
+ * VXLAN encapsulation item attributes.
+ */
+static int
+flow_tcf_vxlan_encap_size(const struct rte_flow_action *action)
+{
+ const struct rte_flow_item *items;
+ int size = 0;
+
+ assert(action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP);
+ assert(action->conf);
+
+ items = ((const struct rte_flow_action_vxlan_encap *)
+ action->conf)->definition;
+ assert(items);
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ /* This item does not require message buffer. */
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4: {
+ const struct rte_flow_item_ipv4 *ipv4 = items->mask;
+
+ size += SZ_NLATTR_DATA_OF(IPV4_ADDR_LEN) * 2;
+ if (ipv4 && ipv4->hdr.time_to_live)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ if (ipv4 && ipv4->hdr.type_of_service)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_IPV6: {
+ const struct rte_flow_item_ipv6 *ipv6 = items->mask;
+
+ size += SZ_NLATTR_DATA_OF(IPV6_ADDR_LEN) * 2;
+ if (ipv6 && ipv6->hdr.hop_limits)
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ if (ipv6 && (rte_be_to_cpu_32(ipv6->hdr.vtc_flow) &
+ (0xfful << RTE_IPV6_HDR_TC_SHIFT)))
+ size += SZ_NLATTR_TYPE_OF(uint8_t) * 2;
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_UDP: {
+ const struct rte_flow_item_udp *udp = items->mask;
+
+ size += SZ_NLATTR_TYPE_OF(uint16_t);
+ if (!udp || udp->hdr.src_port != RTE_BE16(0x0000))
+ size += SZ_NLATTR_TYPE_OF(uint16_t);
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ size += SZ_NLATTR_TYPE_OF(uint32_t);
+ break;
+ default:
+ assert(false);
+ DRV_LOG(WARNING,
+ "unsupported item %p type %d,"
+ " items must be validated"
+ " before flow creation",
+ (const void *)items, items->type);
+ return 0;
+ }
+ }
+ return size;
+}
+
+/**
+ * Calculate maximum size of memory for flow actions of Linux TC flower and
+ * extract specified actions.
+ *
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] action_flags
+ * Pointer to the detected actions.
+ *
+ * @return
+ * Maximum size of memory for actions.
+ */
+static int
+flow_tcf_get_actions_and_size(const struct rte_flow_action actions[],
+ uint64_t *action_flags)
+{
+ int size = 0;
+ uint64_t flags = *action_flags;
+
+ size += SZ_NLATTR_NEST; /* TCA_FLOWER_ACT. */
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("mirred") +
+ SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(struct tc_mirred);
+ flags |= MLX5_FLOW_ACTION_PORT_ID;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("gact") +
+ SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(struct tc_gact);
+ flags |= MLX5_FLOW_ACTION_JUMP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("gact") +
+ SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(struct tc_gact);
+ flags |= MLX5_FLOW_ACTION_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
+ goto action_of_vlan;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ goto action_of_vlan;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
+ goto action_of_vlan;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
+ goto action_of_vlan;
+action_of_vlan:
+ size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("vlan") +
+ SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(struct tc_vlan) +
+ SZ_NLATTR_TYPE_OF(uint16_t) +
+ /* VLAN protocol. */
+ SZ_NLATTR_TYPE_OF(uint16_t) + /* VLAN ID. */
+ SZ_NLATTR_TYPE_OF(uint8_t); /* VLAN prio. */
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("tunnel_key") +
+ SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(uint8_t);
+ size += SZ_NLATTR_TYPE_OF(struct tc_tunnel_key);
+ size += flow_tcf_vxlan_encap_size(actions) +
+ RTE_ALIGN_CEIL /* preceding encap params. */
+ (sizeof(struct flow_tcf_vxlan_encap),
+ MNL_ALIGNTO);
+ flags |= MLX5_FLOW_ACTION_VXLAN_ENCAP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ size += SZ_NLATTR_NEST + /* na_act_index. */
+ SZ_NLATTR_STRZ_OF("tunnel_key") +
+ SZ_NLATTR_NEST + /* TCA_ACT_OPTIONS. */
+ SZ_NLATTR_TYPE_OF(uint8_t);
+ size += SZ_NLATTR_TYPE_OF(struct tc_tunnel_key);
+ size += RTE_ALIGN_CEIL /* preceding decap params. */
+ (sizeof(struct flow_tcf_vxlan_decap),
+ MNL_ALIGNTO);
+ flags |= MLX5_FLOW_ACTION_VXLAN_DECAP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ size += flow_tcf_get_pedit_actions_size(&actions,
+ &flags);
+ break;
+ default:
+ DRV_LOG(WARNING,
+ "unsupported action %p type %d,"
+ " items must be validated before flow creation",
+ (const void *)actions, actions->type);
+ break;
+ }
+ }
+ *action_flags = flags;
+ return size;
+}
+
+/**
+ * Prepare a flow object for Linux TC flower. It calculates the maximum size of
+ * memory required, allocates the memory, initializes Netlink message headers
+ * and set unique TC message handle.
+ *
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Pointer to mlx5_flow object on success,
+ * otherwise NULL and rte_errno is set.
+ */
+static struct mlx5_flow *
+flow_tcf_prepare(const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ size_t size = RTE_ALIGN_CEIL
+ (sizeof(struct mlx5_flow),
+ alignof(struct flow_tcf_tunnel_hdr)) +
+ MNL_ALIGN(sizeof(struct nlmsghdr)) +
+ MNL_ALIGN(sizeof(struct tcmsg));
+ struct mlx5_flow *dev_flow;
+ uint64_t action_flags = 0;
+ struct nlmsghdr *nlh;
+ struct tcmsg *tcm;
+ uint8_t *sp, *tun = NULL;
+
+ size += flow_tcf_get_items_size(attr, items, &action_flags);
+ size += flow_tcf_get_actions_and_size(actions, &action_flags);
+ dev_flow = rte_zmalloc(__func__, size, MNL_ALIGNTO);
+ if (!dev_flow) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not enough memory to create E-Switch flow");
+ return NULL;
+ }
+ sp = (uint8_t *)(dev_flow + 1);
+ if (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP) {
+ sp = RTE_PTR_ALIGN
+ (sp, alignof(struct flow_tcf_tunnel_hdr));
+ tun = sp;
+ sp += RTE_ALIGN_CEIL
+ (sizeof(struct flow_tcf_vxlan_encap),
+ MNL_ALIGNTO);
+#ifndef NDEBUG
+ size -= RTE_ALIGN_CEIL
+ (sizeof(struct flow_tcf_vxlan_encap),
+ MNL_ALIGNTO);
+#endif
+ } else if (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP) {
+ sp = RTE_PTR_ALIGN
+ (sp, alignof(struct flow_tcf_tunnel_hdr));
+ tun = sp;
+ sp += RTE_ALIGN_CEIL
+ (sizeof(struct flow_tcf_vxlan_decap),
+ MNL_ALIGNTO);
+#ifndef NDEBUG
+ size -= RTE_ALIGN_CEIL
+ (sizeof(struct flow_tcf_vxlan_decap),
+ MNL_ALIGNTO);
+#endif
+ } else {
+ sp = RTE_PTR_ALIGN(sp, MNL_ALIGNTO);
+ }
+ nlh = mnl_nlmsg_put_header(sp);
+ tcm = mnl_nlmsg_put_extra_header(nlh, sizeof(*tcm));
+ *dev_flow = (struct mlx5_flow){
+ .tcf = (struct mlx5_flow_tcf){
+#ifndef NDEBUG
+ .nlsize = size - RTE_ALIGN_CEIL
+ (sizeof(struct mlx5_flow),
+ alignof(struct flow_tcf_tunnel_hdr)),
+#endif
+ .tunnel = (struct flow_tcf_tunnel_hdr *)tun,
+ .nlh = nlh,
+ .tcm = tcm,
+ },
+ };
+ if (action_flags & MLX5_FLOW_ACTION_VXLAN_DECAP)
+ dev_flow->tcf.tunnel->type = FLOW_TCF_TUNACT_VXLAN_DECAP;
+ else if (action_flags & MLX5_FLOW_ACTION_VXLAN_ENCAP)
+ dev_flow->tcf.tunnel->type = FLOW_TCF_TUNACT_VXLAN_ENCAP;
+ return dev_flow;
+}
+
+/**
+ * Make adjustments for supporting count actions.
+ *
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
+ * @param[in] dev_flow
+ * Pointer to mlx5_flow.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 On success else a negative errno value is returned and rte_errno is set.
+ */
+static int
+flow_tcf_translate_action_count(struct rte_eth_dev *dev __rte_unused,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct rte_flow *flow = dev_flow->flow;
+
+ if (!flow->counter) {
+ flow->counter = flow_tcf_counter_new();
+ if (!flow->counter)
+ return rte_flow_error_set(error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot get counter"
+ " context.");
+ }
+ return 0;
+}
+
+/**
+ * Convert VXLAN VNI to 32-bit integer.
+ *
+ * @param[in] vni
+ * VXLAN VNI in 24-bit wire format.
+ *
+ * @return
+ * VXLAN VNI as a 32-bit integer value in network endianness.
+ */
+static inline rte_be32_t
+vxlan_vni_as_be32(const uint8_t vni[3])
+{
+ union {
+ uint8_t vni[4];
+ rte_be32_t dword;
+ } ret = {
+ .vni = { 0, vni[0], vni[1], vni[2] },
+ };
+ return ret.dword;
+}
+
+/**
+ * Helper function to process RTE_FLOW_ITEM_TYPE_ETH entry in configuration
+ * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the MAC address fields
+ * in the encapsulation parameters structure. The item must be prevalidated,
+ * no any validation checks performed by function.
+ *
+ * @param[in] spec
+ * RTE_FLOW_ITEM_TYPE_ETH entry specification.
+ * @param[in] mask
+ * RTE_FLOW_ITEM_TYPE_ETH entry mask.
+ * @param[out] encap
+ * Structure to fill the gathered MAC address data.
+ */
+static void
+flow_tcf_parse_vxlan_encap_eth(const struct rte_flow_item_eth *spec,
+ const struct rte_flow_item_eth *mask,
+ struct flow_tcf_vxlan_encap *encap)
+{
+ /* Item must be validated before. No redundant checks. */
+ assert(spec);
+ if (!mask || !memcmp(&mask->dst,
+ &rte_flow_item_eth_mask.dst,
+ sizeof(rte_flow_item_eth_mask.dst))) {
+ /*
+ * Ethernet addresses are not supported by
+ * tc as tunnel_key parameters. Destination
+ * address is needed to form encap packet
+ * header and retrieved by kernel from
+ * implicit sources (ARP table, etc),
+ * address masks are not supported at all.
+ */
+ encap->eth.dst = spec->dst;
+ encap->mask |= FLOW_TCF_ENCAP_ETH_DST;
+ }
+ if (!mask || !memcmp(&mask->src,
+ &rte_flow_item_eth_mask.src,
+ sizeof(rte_flow_item_eth_mask.src))) {
+ /*
+ * Ethernet addresses are not supported by
+ * tc as tunnel_key parameters. Source ethernet
+ * address is ignored anyway.
+ */
+ encap->eth.src = spec->src;
+ encap->mask |= FLOW_TCF_ENCAP_ETH_SRC;
+ }
+}
+
+/**
+ * Helper function to process RTE_FLOW_ITEM_TYPE_IPV4 entry in configuration
+ * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the IPV4 address fields
+ * in the encapsulation parameters structure. The item must be prevalidated,
+ * no any validation checks performed by function.
+ *
+ * @param[in] spec
+ * RTE_FLOW_ITEM_TYPE_IPV4 entry specification.
+ * @param[in] mask
+ * RTE_FLOW_ITEM_TYPE_IPV4 entry mask.
+ * @param[out] encap
+ * Structure to fill the gathered IPV4 address data.
+ */
+static void
+flow_tcf_parse_vxlan_encap_ipv4(const struct rte_flow_item_ipv4 *spec,
+ const struct rte_flow_item_ipv4 *mask,
+ struct flow_tcf_vxlan_encap *encap)
+{
+ /* Item must be validated before. No redundant checks. */
+ assert(spec);
+ encap->ipv4.dst = spec->hdr.dst_addr;
+ encap->ipv4.src = spec->hdr.src_addr;
+ encap->mask |= FLOW_TCF_ENCAP_IPV4_SRC |
+ FLOW_TCF_ENCAP_IPV4_DST;
+ if (mask && mask->hdr.type_of_service) {
+ encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
+ encap->ip_tos = spec->hdr.type_of_service;
+ }
+ if (mask && mask->hdr.time_to_live) {
+ encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
+ encap->ip_ttl_hop = spec->hdr.time_to_live;
+ }
+}
+
+/**
+ * Helper function to process RTE_FLOW_ITEM_TYPE_IPV6 entry in configuration
+ * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the IPV6 address fields
+ * in the encapsulation parameters structure. The item must be prevalidated,
+ * no any validation checks performed by function.
+ *
+ * @param[in] spec
+ * RTE_FLOW_ITEM_TYPE_IPV6 entry specification.
+ * @param[in] mask
+ * RTE_FLOW_ITEM_TYPE_IPV6 entry mask.
+ * @param[out] encap
+ * Structure to fill the gathered IPV6 address data.
+ */
+static void
+flow_tcf_parse_vxlan_encap_ipv6(const struct rte_flow_item_ipv6 *spec,
+ const struct rte_flow_item_ipv6 *mask,
+ struct flow_tcf_vxlan_encap *encap)
+{
+ /* Item must be validated before. No redundant checks. */
+ assert(spec);
+ memcpy(encap->ipv6.dst, spec->hdr.dst_addr, IPV6_ADDR_LEN);
+ memcpy(encap->ipv6.src, spec->hdr.src_addr, IPV6_ADDR_LEN);
+ encap->mask |= FLOW_TCF_ENCAP_IPV6_SRC |
+ FLOW_TCF_ENCAP_IPV6_DST;
+ if (mask) {
+ if ((rte_be_to_cpu_32(mask->hdr.vtc_flow) >>
+ RTE_IPV6_HDR_TC_SHIFT) & 0xff) {
+ encap->mask |= FLOW_TCF_ENCAP_IP_TOS;
+ encap->ip_tos = (rte_be_to_cpu_32
+ (spec->hdr.vtc_flow) >>
+ RTE_IPV6_HDR_TC_SHIFT) & 0xff;
+ }
+ if (mask->hdr.hop_limits) {
+ encap->mask |= FLOW_TCF_ENCAP_IP_TTL;
+ encap->ip_ttl_hop = spec->hdr.hop_limits;
+ }
+ }
+}
+
+/**
+ * Helper function to process RTE_FLOW_ITEM_TYPE_UDP entry in configuration
+ * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the UDP port fields
+ * in the encapsulation parameters structure. The item must be prevalidated,
+ * no any validation checks performed by function.
+ *
+ * @param[in] spec
+ * RTE_FLOW_ITEM_TYPE_UDP entry specification.
+ * @param[in] mask
+ * RTE_FLOW_ITEM_TYPE_UDP entry mask.
+ * @param[out] encap
+ * Structure to fill the gathered UDP port data.
+ */
+static void
+flow_tcf_parse_vxlan_encap_udp(const struct rte_flow_item_udp *spec,
+ const struct rte_flow_item_udp *mask,
+ struct flow_tcf_vxlan_encap *encap)
+{
+ assert(spec);
+ encap->udp.dst = spec->hdr.dst_port;
+ encap->mask |= FLOW_TCF_ENCAP_UDP_DST;
+ if (!mask || mask->hdr.src_port != RTE_BE16(0x0000)) {
+ encap->udp.src = spec->hdr.src_port;
+ encap->mask |= FLOW_TCF_ENCAP_IPV4_SRC;
+ }
+}
+
+/**
+ * Helper function to process RTE_FLOW_ITEM_TYPE_VXLAN entry in configuration
+ * of action RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. Fills the VNI fields
+ * in the encapsulation parameters structure. The item must be prevalidated,
+ * no any validation checks performed by function.
+ *
+ * @param[in] spec
+ * RTE_FLOW_ITEM_TYPE_VXLAN entry specification.
+ * @param[out] encap
+ * Structure to fill the gathered VNI address data.
+ */
+static void
+flow_tcf_parse_vxlan_encap_vni(const struct rte_flow_item_vxlan *spec,
+ struct flow_tcf_vxlan_encap *encap)
+{
+ /* Item must be validated before. Do not redundant checks. */
+ assert(spec);
+ memcpy(encap->vxlan.vni, spec->vni, sizeof(encap->vxlan.vni));
+ encap->mask |= FLOW_TCF_ENCAP_VXLAN_VNI;
+}
+
+/**
+ * Populate consolidated encapsulation object from list of pattern items.
+ *
+ * Helper function to process configuration of action such as
+ * RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP. The item list should be
+ * validated, there is no way to return an meaningful error.
+ *
+ * @param[in] action
+ * RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP action object.
+ * List of pattern items to gather data from.
+ * @param[out] src
+ * Structure to fill gathered data.
+ */
+static void
+flow_tcf_vxlan_encap_parse(const struct rte_flow_action *action,
+ struct flow_tcf_vxlan_encap *encap)
+{
+ union {
+ const struct rte_flow_item_eth *eth;
+ const struct rte_flow_item_ipv4 *ipv4;
+ const struct rte_flow_item_ipv6 *ipv6;
+ const struct rte_flow_item_udp *udp;
+ const struct rte_flow_item_vxlan *vxlan;
+ } spec, mask;
+ const struct rte_flow_item *items;
+
+ assert(action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP);
+ assert(action->conf);
+
+ items = ((const struct rte_flow_action_vxlan_encap *)
+ action->conf)->definition;
+ assert(items);
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ mask.eth = items->mask;
+ spec.eth = items->spec;
+ flow_tcf_parse_vxlan_encap_eth(spec.eth, mask.eth,
+ encap);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ spec.ipv4 = items->spec;
+ mask.ipv4 = items->mask;
+ flow_tcf_parse_vxlan_encap_ipv4(spec.ipv4, mask.ipv4,
+ encap);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ spec.ipv6 = items->spec;
+ mask.ipv6 = items->mask;
+ flow_tcf_parse_vxlan_encap_ipv6(spec.ipv6, mask.ipv6,
+ encap);
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ mask.udp = items->mask;
+ spec.udp = items->spec;
+ flow_tcf_parse_vxlan_encap_udp(spec.udp, mask.udp,
+ encap);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ spec.vxlan = items->spec;
+ flow_tcf_parse_vxlan_encap_vni(spec.vxlan, encap);
+ break;
+ default:
+ assert(false);
+ DRV_LOG(WARNING,
+ "unsupported item %p type %d,"
+ " items must be validated"
+ " before flow creation",
+ (const void *)items, items->type);
+ encap->mask = 0;
+ return;
+ }
+ }
+}
+
+/**
+ * Translate flow for Linux TC flower and construct Netlink message.
+ *
+ * @param[in] priv
+ * Pointer to the priv structure.
+ * @param[in, out] flow
+ * Pointer to the sub flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_tcf_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ union {
+ const struct rte_flow_item_port_id *port_id;
+ const struct rte_flow_item_eth *eth;
+ const struct rte_flow_item_vlan *vlan;
+ const struct rte_flow_item_ipv4 *ipv4;
+ const struct rte_flow_item_ipv6 *ipv6;
+ const struct rte_flow_item_tcp *tcp;
+ const struct rte_flow_item_udp *udp;
+ const struct rte_flow_item_vxlan *vxlan;
+ } spec, mask;
+ union {
+ const struct rte_flow_action_port_id *port_id;
+ const struct rte_flow_action_jump *jump;
+ const struct rte_flow_action_of_push_vlan *of_push_vlan;
+ const struct rte_flow_action_of_set_vlan_vid *
+ of_set_vlan_vid;
+ const struct rte_flow_action_of_set_vlan_pcp *
+ of_set_vlan_pcp;
+ } conf;
+ union {
+ struct flow_tcf_tunnel_hdr *hdr;
+ struct flow_tcf_vxlan_decap *vxlan;
+ } decap = {
+ .hdr = NULL,
+ };
+ union {
+ struct flow_tcf_tunnel_hdr *hdr;
+ struct flow_tcf_vxlan_encap *vxlan;
+ } encap = {
+ .hdr = NULL,
+ };
+ struct flow_tcf_ptoi ptoi[PTOI_TABLE_SZ_MAX(dev)];
+ struct nlmsghdr *nlh = dev_flow->tcf.nlh;
+ struct tcmsg *tcm = dev_flow->tcf.tcm;
+ uint32_t na_act_index_cur;
+ rte_be16_t inner_etype = RTE_BE16(ETH_P_ALL);
+ rte_be16_t outer_etype = RTE_BE16(ETH_P_ALL);
+ rte_be16_t vlan_etype = RTE_BE16(ETH_P_ALL);
+ bool ip_proto_set = 0;
+ bool tunnel_outer = 0;
+ struct nlattr *na_flower;
+ struct nlattr *na_flower_act;
+ struct nlattr *na_vlan_id = NULL;
+ struct nlattr *na_vlan_priority = NULL;
+ uint64_t item_flags = 0;
+ int ret;
+
+ claim_nonzero(flow_tcf_build_ptoi_table(dev, ptoi,
+ PTOI_TABLE_SZ_MAX(dev)));
+ if (dev_flow->tcf.tunnel) {
+ switch (dev_flow->tcf.tunnel->type) {
+ case FLOW_TCF_TUNACT_VXLAN_DECAP:
+ decap.vxlan = dev_flow->tcf.vxlan_decap;
+ tunnel_outer = 1;
+ break;
+ case FLOW_TCF_TUNACT_VXLAN_ENCAP:
+ encap.vxlan = dev_flow->tcf.vxlan_encap;
+ break;
+ /* New tunnel actions can be added here. */
+ default:
+ assert(false);
+ break;
+ }
+ }
+ nlh = dev_flow->tcf.nlh;
+ tcm = dev_flow->tcf.tcm;
+ /* Prepare API must have been called beforehand. */
+ assert(nlh != NULL && tcm != NULL);
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm_ifindex = ptoi[0].ifindex;
+ tcm->tcm_parent = TC_H_MAKE(TC_H_INGRESS, TC_H_MIN_INGRESS);
+ /*
+ * Priority cannot be zero to prevent the kernel from picking one
+ * automatically.
+ */
+ tcm->tcm_info = TC_H_MAKE((attr->priority + 1) << 16, outer_etype);
+ if (attr->group > 0)
+ mnl_attr_put_u32(nlh, TCA_CHAIN, attr->group);
+ mnl_attr_put_strz(nlh, TCA_KIND, "flower");
+ na_flower = mnl_attr_nest_start(nlh, TCA_OPTIONS);
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ unsigned int i;
+
+ switch (items->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ mask.port_id = flow_tcf_item_mask
+ (items, &rte_flow_item_port_id_mask,
+ &flow_tcf_mask_supported.port_id,
+ &flow_tcf_mask_empty.port_id,
+ sizeof(flow_tcf_mask_supported.port_id),
+ error);
+ assert(mask.port_id);
+ if (mask.port_id == &flow_tcf_mask_empty.port_id)
+ break;
+ spec.port_id = items->spec;
+ if (!mask.port_id->id)
+ i = 0;
+ else
+ for (i = 0; ptoi[i].ifindex; ++i)
+ if (ptoi[i].port_id == spec.port_id->id)
+ break;
+ assert(ptoi[i].ifindex);
+ tcm->tcm_ifindex = ptoi[i].ifindex;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ mask.eth = flow_tcf_item_mask
+ (items, &rte_flow_item_eth_mask,
+ &flow_tcf_mask_supported.eth,
+ &flow_tcf_mask_empty.eth,
+ sizeof(flow_tcf_mask_supported.eth),
+ error);
+ assert(mask.eth);
+ if (mask.eth == &flow_tcf_mask_empty.eth)
+ break;
+ spec.eth = items->spec;
+ if (mask.eth->type) {
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
+ inner_etype = spec.eth->type;
+ else
+ outer_etype = spec.eth->type;
+ }
+ if (tunnel_outer) {
+ DRV_LOG(WARNING,
+ "outer L2 addresses cannot be"
+ " forced is outer ones for tunnel,"
+ " parameter is ignored");
+ break;
+ }
+ if (!rte_is_zero_ether_addr(&mask.eth->dst)) {
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST,
+ RTE_ETHER_ADDR_LEN,
+ spec.eth->dst.addr_bytes);
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_DST_MASK,
+ RTE_ETHER_ADDR_LEN,
+ mask.eth->dst.addr_bytes);
+ }
+ if (!rte_is_zero_ether_addr(&mask.eth->src)) {
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC,
+ RTE_ETHER_ADDR_LEN,
+ spec.eth->src.addr_bytes);
+ mnl_attr_put(nlh, TCA_FLOWER_KEY_ETH_SRC_MASK,
+ RTE_ETHER_ADDR_LEN,
+ mask.eth->src.addr_bytes);
+ }
+ assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ assert(!encap.hdr);
+ assert(!decap.hdr);
+ assert(!tunnel_outer);
+ item_flags |= MLX5_FLOW_LAYER_OUTER_VLAN;
+ mask.vlan = flow_tcf_item_mask
+ (items, &rte_flow_item_vlan_mask,
+ &flow_tcf_mask_supported.vlan,
+ &flow_tcf_mask_empty.vlan,
+ sizeof(flow_tcf_mask_supported.vlan),
+ error);
+ assert(mask.vlan);
+ if (mask.vlan == &flow_tcf_mask_empty.vlan)
+ break;
+ spec.vlan = items->spec;
+ assert(outer_etype == RTE_BE16(ETH_P_ALL) ||
+ outer_etype == RTE_BE16(ETH_P_8021Q));
+ outer_etype = RTE_BE16(ETH_P_8021Q);
+ if (mask.vlan->inner_type)
+ vlan_etype = spec.vlan->inner_type;
+ if (mask.vlan->tci & RTE_BE16(0xe000))
+ mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_VLAN_PRIO,
+ (rte_be_to_cpu_16
+ (spec.vlan->tci) >> 13) & 0x7);
+ if (mask.vlan->tci & RTE_BE16(0x0fff))
+ mnl_attr_put_u16(nlh, TCA_FLOWER_KEY_VLAN_ID,
+ rte_be_to_cpu_16
+ (spec.vlan->tci &
+ RTE_BE16(0x0fff)));
+ assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ mask.ipv4 = flow_tcf_item_mask
+ (items, &rte_flow_item_ipv4_mask,
+ &flow_tcf_mask_supported.ipv4,
+ &flow_tcf_mask_empty.ipv4,
+ sizeof(flow_tcf_mask_supported.ipv4),
+ error);
+ assert(mask.ipv4);
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
+ assert(inner_etype == RTE_BE16(ETH_P_ALL) ||
+ inner_etype == RTE_BE16(ETH_P_IP));
+ inner_etype = RTE_BE16(ETH_P_IP);
+ } else if (outer_etype == RTE_BE16(ETH_P_8021Q)) {
+ assert(vlan_etype == RTE_BE16(ETH_P_ALL) ||
+ vlan_etype == RTE_BE16(ETH_P_IP));
+ vlan_etype = RTE_BE16(ETH_P_IP);
+ } else {
+ assert(outer_etype == RTE_BE16(ETH_P_ALL) ||
+ outer_etype == RTE_BE16(ETH_P_IP));
+ outer_etype = RTE_BE16(ETH_P_IP);
+ }
+ spec.ipv4 = items->spec;
+ if (!tunnel_outer && mask.ipv4->hdr.next_proto_id) {
+ /*
+ * No way to set IP protocol for outer tunnel
+ * layers. Usually it is fixed, for example,
+ * to UDP for VXLAN/GPE.
+ */
+ assert(spec.ipv4); /* Mask is not empty. */
+ mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,
+ spec.ipv4->hdr.next_proto_id);
+ ip_proto_set = 1;
+ }
+ if (mask.ipv4 == &flow_tcf_mask_empty.ipv4 ||
+ (!mask.ipv4->hdr.src_addr &&
+ !mask.ipv4->hdr.dst_addr)) {
+ if (!tunnel_outer)
+ break;
+ /*
+ * For tunnel outer we must set outer IP key
+ * anyway, even if the specification/mask is
+ * empty. There is no another way to tell
+ * kernel about he outer layer protocol.
+ */
+ mnl_attr_put_u32
+ (nlh, TCA_FLOWER_KEY_ENC_IPV4_SRC,
+ mask.ipv4->hdr.src_addr);
+ mnl_attr_put_u32
+ (nlh, TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK,
+ mask.ipv4->hdr.src_addr);
+ assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
+ break;
+ }
+ if (mask.ipv4->hdr.src_addr) {
+ mnl_attr_put_u32
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IPV4_SRC :
+ TCA_FLOWER_KEY_IPV4_SRC,
+ spec.ipv4->hdr.src_addr);
+ mnl_attr_put_u32
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IPV4_SRC_MASK :
+ TCA_FLOWER_KEY_IPV4_SRC_MASK,
+ mask.ipv4->hdr.src_addr);
+ }
+ if (mask.ipv4->hdr.dst_addr) {
+ mnl_attr_put_u32
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IPV4_DST :
+ TCA_FLOWER_KEY_IPV4_DST,
+ spec.ipv4->hdr.dst_addr);
+ mnl_attr_put_u32
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IPV4_DST_MASK :
+ TCA_FLOWER_KEY_IPV4_DST_MASK,
+ mask.ipv4->hdr.dst_addr);
+ }
+ if (mask.ipv4->hdr.time_to_live) {
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TTL :
+ TCA_FLOWER_KEY_IP_TTL,
+ spec.ipv4->hdr.time_to_live);
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TTL_MASK :
+ TCA_FLOWER_KEY_IP_TTL_MASK,
+ mask.ipv4->hdr.time_to_live);
+ }
+ if (mask.ipv4->hdr.type_of_service) {
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TOS :
+ TCA_FLOWER_KEY_IP_TOS,
+ spec.ipv4->hdr.type_of_service);
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TOS_MASK :
+ TCA_FLOWER_KEY_IP_TOS_MASK,
+ mask.ipv4->hdr.type_of_service);
+ }
+ assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6: {
+ bool ipv6_src, ipv6_dst;
+ uint8_t msk6, tos6;
+
+ item_flags |= (item_flags & MLX5_FLOW_LAYER_TUNNEL) ?
+ MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ mask.ipv6 = flow_tcf_item_mask
+ (items, &rte_flow_item_ipv6_mask,
+ &flow_tcf_mask_supported.ipv6,
+ &flow_tcf_mask_empty.ipv6,
+ sizeof(flow_tcf_mask_supported.ipv6),
+ error);
+ assert(mask.ipv6);
+ if (item_flags & MLX5_FLOW_LAYER_TUNNEL) {
+ assert(inner_etype == RTE_BE16(ETH_P_ALL) ||
+ inner_etype == RTE_BE16(ETH_P_IPV6));
+ inner_etype = RTE_BE16(ETH_P_IPV6);
+ } else if (outer_etype == RTE_BE16(ETH_P_8021Q)) {
+ assert(vlan_etype == RTE_BE16(ETH_P_ALL) ||
+ vlan_etype == RTE_BE16(ETH_P_IPV6));
+ vlan_etype = RTE_BE16(ETH_P_IPV6);
+ } else {
+ assert(outer_etype == RTE_BE16(ETH_P_ALL) ||
+ outer_etype == RTE_BE16(ETH_P_IPV6));
+ outer_etype = RTE_BE16(ETH_P_IPV6);
+ }
+ spec.ipv6 = items->spec;
+ if (!tunnel_outer && mask.ipv6->hdr.proto) {
+ /*
+ * No way to set IP protocol for outer tunnel
+ * layers. Usually it is fixed, for example,
+ * to UDP for VXLAN/GPE.
+ */
+ assert(spec.ipv6); /* Mask is not empty. */
+ mnl_attr_put_u8(nlh, TCA_FLOWER_KEY_IP_PROTO,
+ spec.ipv6->hdr.proto);
+ ip_proto_set = 1;
+ }
+ ipv6_dst = !IN6_IS_ADDR_UNSPECIFIED
+ (mask.ipv6->hdr.dst_addr);
+ ipv6_src = !IN6_IS_ADDR_UNSPECIFIED
+ (mask.ipv6->hdr.src_addr);
+ if (mask.ipv6 == &flow_tcf_mask_empty.ipv6 ||
+ (!ipv6_dst && !ipv6_src)) {
+ if (!tunnel_outer)
+ break;
+ /*
+ * For tunnel outer we must set outer IP key
+ * anyway, even if the specification/mask is
+ * empty. There is no another way to tell
+ * kernel about he outer layer protocol.
+ */
+ mnl_attr_put(nlh,
+ TCA_FLOWER_KEY_ENC_IPV6_SRC,
+ IPV6_ADDR_LEN,
+ mask.ipv6->hdr.src_addr);
+ mnl_attr_put(nlh,
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK,
+ IPV6_ADDR_LEN,
+ mask.ipv6->hdr.src_addr);
+ assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);
+ break;
+ }
+ if (ipv6_src) {
+ mnl_attr_put(nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IPV6_SRC :
+ TCA_FLOWER_KEY_IPV6_SRC,
+ IPV6_ADDR_LEN,
+ spec.ipv6->hdr.src_addr);
+ mnl_attr_put(nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IPV6_SRC_MASK :
+ TCA_FLOWER_KEY_IPV6_SRC_MASK,
+ IPV6_ADDR_LEN,
+ mask.ipv6->hdr.src_addr);
+ }
+ if (ipv6_dst) {
+ mnl_attr_put(nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IPV6_DST :
+ TCA_FLOWER_KEY_IPV6_DST,
+ IPV6_ADDR_LEN,
+ spec.ipv6->hdr.dst_addr);
+ mnl_attr_put(nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IPV6_DST_MASK :
+ TCA_FLOWER_KEY_IPV6_DST_MASK,
+ IPV6_ADDR_LEN,
+ mask.ipv6->hdr.dst_addr);
+ }
+ if (mask.ipv6->hdr.hop_limits) {
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TTL :
+ TCA_FLOWER_KEY_IP_TTL,
+ spec.ipv6->hdr.hop_limits);
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TTL_MASK :
+ TCA_FLOWER_KEY_IP_TTL_MASK,
+ mask.ipv6->hdr.hop_limits);
+ }
+ msk6 = (rte_be_to_cpu_32(mask.ipv6->hdr.vtc_flow) >>
+ RTE_IPV6_HDR_TC_SHIFT) & 0xff;
+ if (msk6) {
+ tos6 = (rte_be_to_cpu_32
+ (spec.ipv6->hdr.vtc_flow) >>
+ RTE_IPV6_HDR_TC_SHIFT) & 0xff;
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TOS :
+ TCA_FLOWER_KEY_IP_TOS, tos6);
+ mnl_attr_put_u8
+ (nlh, tunnel_outer ?
+ TCA_FLOWER_KEY_ENC_IP_TOS_MASK :
+ TCA_FLOWER_KEY_IP_TOS_MASK, msk6);
+ }
+ assert(dev_flow->tcf.nlsize >= nlh->nlmsg_len);