X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=534cd9338ece772915c1f5848fccb1a88de133c8;hb=d622cad892a1fc715635d137d1598053fd0b8e3a;hp=cd04c446b5d0b735fb8e1fde2228b54bb80d76cb;hpb=5291c601bb93e8461ee0c51a6a3385d83825f88b;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index cd04c446b5..534cd9338e 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -1047,6 +1047,110 @@ mlx5_flow_validate_attributes(struct rte_eth_dev *dev, return 0; } +/** + * Validate ICMP6 item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_icmp6 *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + + if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with ICMP6 layer"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv6 is mandatory to filter on" + " ICMP6"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); + if (!mask) + mask = &rte_flow_item_icmp6_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_icmp6_mask, + sizeof(struct rte_flow_item_icmp6), error); + if (ret < 0) + return ret; + return 0; +} + +/** + * Validate ICMP item. + * + * @param[in] item + * Item specification. + * @param[in] item_flags + * Bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_validate_item_icmp(const struct rte_flow_item *item, + uint64_t item_flags, + uint8_t target_protocol, + struct rte_flow_error *error) +{ + const struct rte_flow_item_icmp *mask = item->mask; + const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); + const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; + const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 : + MLX5_FLOW_LAYER_OUTER_L4; + int ret; + + if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "protocol filtering not compatible" + " with ICMP layer"); + if (!(item_flags & l3m)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "IPv4 is mandatory to filter" + " on ICMP"); + if (item_flags & l4m) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "multiple L4 layers not supported"); + if (!mask) + mask = &rte_flow_item_icmp_mask; + ret = mlx5_flow_item_acceptable + (item, (const uint8_t *)mask, + (const uint8_t *)&rte_flow_item_icmp_mask, + sizeof(struct rte_flow_item_icmp), error); + if (ret < 0) + return ret; + return 0; +} + /** * Validate Ethernet item. * @@ -2091,6 +2195,10 @@ flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list, else flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *)); flow = rte_calloc(__func__, 1, flow_size, 0); + if (!flow) { + rte_errno = ENOMEM; + return NULL; + } flow->drv_type = flow_get_drv_type(dev, attr); flow->ingress = attr->ingress; flow->transfer = attr->transfer;