/* Key of thread specific flow workspace data. */
static rte_thread_key key_workspace;
+int
+mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_esp *mask = item->mask;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret;
+
+ if (!(item_flags & l3m))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 is mandatory to filter on L4");
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L4 layers not supported");
+ if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with ESP layer");
+ if (!mask)
+ mask = &rte_flow_item_esp_mask;
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_esp_mask,
+ sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+ error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
int
mlx5_flow_os_init_workspace_once(void)
{
{
return mlx5_glue->dr_sync_domain(domain, flags);
}
+
+/**
+ * Validate ESP item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error);
+
#endif /* RTE_PMD_MLX5_FLOW_OS_H_ */
return 0;
}
-/**
- * Validate ESP item.
- *
- * @param[in] item
- * Item specification.
- * @param[in] item_flags
- * Bit-fields that holds the items detected until now.
- * @param[in] target_protocol
- * The next protocol in the previous item.
- * @param[out] error
- * Pointer to error structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx5_flow_validate_item_esp(const struct rte_flow_item *item,
- uint64_t item_flags,
- uint8_t target_protocol,
- struct rte_flow_error *error)
-{
- const struct rte_flow_item_esp *mask = item->mask;
- const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
- const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
- MLX5_FLOW_LAYER_OUTER_L3;
- const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
- MLX5_FLOW_LAYER_OUTER_L4;
- int ret;
-
- if (!(item_flags & l3m))
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "L3 is mandatory to filter on L4");
- if (item_flags & l4m)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "multiple L4 layers not supported");
- if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
- return rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM, item,
- "protocol filtering not compatible"
- " with ESP layer");
- if (!mask)
- mask = &rte_flow_item_esp_mask;
- ret = mlx5_flow_item_acceptable
- (item, (const uint8_t *)mask,
- (const uint8_t *)&rte_flow_item_esp_mask,
- sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
- error);
- if (ret < 0)
- return ret;
- return 0;
-}
-
/**
* Validate UDP item.
*
uint8_t target_protocol,
const struct rte_flow_item_tcp *flow_mask,
struct rte_flow_error *error);
-int mlx5_flow_validate_item_esp(const struct rte_flow_item *item,
- uint64_t item_flags,
- uint8_t target_protocol,
- struct rte_flow_error *error);
int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
uint64_t item_flags,
uint8_t target_protocol,
case RTE_FLOW_ITEM_TYPE_VOID:
break;
case RTE_FLOW_ITEM_TYPE_ESP:
- ret = mlx5_flow_validate_item_esp(items, item_flags,
+ ret = mlx5_flow_os_validate_item_esp(items, item_flags,
next_protocol,
error);
if (ret < 0)
rte_errno = old_err;
return err;
}
+
+int
+mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_esp *mask = item->mask;
+ const struct rte_flow_item_esp *spec = item->spec;
+ const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
+ MLX5_FLOW_LAYER_OUTER_L3;
+ const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
+ MLX5_FLOW_LAYER_OUTER_L4;
+ int ret;
+
+ if (!(item_flags & l3m))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "L3 is mandatory to filter on L4");
+ if (item_flags & l4m)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "multiple L4 layers not supported");
+ if (target_protocol != 0xff && target_protocol != IPPROTO_ESP)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "protocol filtering not compatible"
+ " with ESP layer");
+ if (!mask)
+ mask = &rte_flow_item_esp_mask;
+ if (spec && (spec->hdr.spi & mask->hdr.spi))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "matching on spi field in esp is not"
+ " supported on Windows");
+ ret = mlx5_flow_item_acceptable
+ (item, (const uint8_t *)mask,
+ (const uint8_t *)&rte_flow_item_esp_mask,
+ sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
+ error);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
case RTE_FLOW_ITEM_TYPE_TCP:
case RTE_FLOW_ITEM_TYPE_IPV6:
case RTE_FLOW_ITEM_TYPE_VLAN:
+ case RTE_FLOW_ITEM_TYPE_ESP:
return true;
default:
return false;
size_t num_actions,
void *actions[], void **flow);
int mlx5_flow_os_destroy_flow(void *drv_flow_ptr);
+
+/**
+ * Validate ESP item.
+ *
+ * @param[in] item
+ * Item specification.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] target_protocol
+ * The next protocol in the previous item.
+ * @param[out] error
+ * Pointer to error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
+ uint64_t item_flags,
+ uint8_t target_protocol,
+ struct rte_flow_error *error);
+
#endif /* RTE_PMD_MLX5_FLOW_OS_H_ */