struct rte_flow_desc_data {
const char *name;
size_t size;
+ size_t (*desc_fn)(void *dst, const void *src);
};
+/**
+ *
+ * @param buf
+ * Destination memory.
+ * @param data
+ * Source memory
+ * @param size
+ * Requested copy size
+ * @param desc
+ * rte_flow_desc_item - for flow item conversion.
+ * rte_flow_desc_action - for flow action conversion.
+ * @param type
+ * Offset into the desc param or negative value for private flow elements.
+ */
+static inline size_t
+rte_flow_conv_copy(void *buf, const void *data, const size_t size,
+ const struct rte_flow_desc_data *desc, int type)
+{
+ /**
+ * Allow PMD private flow item
+ */
+ bool rte_type = type >= 0;
+
+ size_t sz = rte_type ? desc[type].size : sizeof(void *);
+ if (buf == NULL || data == NULL)
+ return 0;
+ rte_memcpy(buf, data, (size > sz ? sz : size));
+ if (rte_type && desc[type].desc_fn)
+ sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
+ return sz;
+}
+
+static size_t
+rte_flow_item_flex_conv(void *buf, const void *data)
+{
+ struct rte_flow_item_flex *dst = buf;
+ const struct rte_flow_item_flex *src = data;
+ if (buf) {
+ dst->pattern = rte_memcpy
+ ((void *)((uintptr_t)(dst + 1)), src->pattern,
+ src->length);
+ }
+ return src->length;
+}
+
/** Generate flow_item[] entry. */
#define MK_FLOW_ITEM(t, s) \
[RTE_FLOW_ITEM_TYPE_ ## t] = { \
.name = # t, \
- .size = s, \
+ .size = s, \
+ .desc_fn = NULL,\
+ }
+
+#define MK_FLOW_ITEM_FN(t, s, fn) \
+ [RTE_FLOW_ITEM_TYPE_ ## t] = {\
+ .name = # t, \
+ .size = s, \
+ .desc_fn = fn, \
}
/** Information about known flow pattern items. */
MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
+ MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
+ MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
+ rte_flow_item_flex_conv),
+ MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
+ MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
};
/** Generate flow_action[] entry. */
[RTE_FLOW_ACTION_TYPE_ ## t] = { \
.name = # t, \
.size = s, \
+ .desc_fn = NULL,\
}
+#define MK_FLOW_ACTION_FN(t, fn) \
+ [RTE_FLOW_ACTION_TYPE_ ## t] = { \
+ .name = # t, \
+ .size = 0, \
+ .desc_fn = fn,\
+ }
+
+
/** Information about known flow actions. */
static const struct rte_flow_desc_data rte_flow_desc_action[] = {
MK_FLOW_ACTION(END, 0),
if (flag < 0)
goto error;
rte_flow_dynf_metadata_offs = offset;
- rte_flow_dynf_metadata_mask = (1ULL << flag);
+ rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
return 0;
error:
rte_flow_dynf_metadata_offs = -1;
- rte_flow_dynf_metadata_mask = 0ULL;
+ rte_flow_dynf_metadata_mask = UINT64_C(0);
return -rte_errno;
}
}
break;
default:
- /**
- * allow PMD private flow item
- */
- off = (int)item->type >= 0 ?
- rte_flow_desc_item[item->type].size : sizeof(void *);
- rte_memcpy(buf, data, (size > off ? off : size));
+ off = rte_flow_conv_copy(buf, data, size,
+ rte_flow_desc_item, item->type);
break;
}
return off;
}
break;
default:
- /**
- * allow PMD private flow action
- */
- off = (int)action->type >= 0 ?
- rte_flow_desc_action[action->type].size : sizeof(void *);
- rte_memcpy(buf, action->conf, (size > off ? off : size));
+ off = rte_flow_conv_copy(buf, action->conf, size,
+ rte_flow_desc_action, action->type);
break;
}
return off;
ops->pick_transfer_proxy(dev, proxy_port_id, error),
error);
}
+
+struct rte_flow_item_flex_handle *
+rte_flow_flex_item_create(uint16_t port_id,
+ const struct rte_flow_item_flex_conf *conf,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ struct rte_flow_item_flex_handle *handle;
+
+ if (unlikely(!ops))
+ return NULL;
+ if (unlikely(!ops->flex_item_create)) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+ return NULL;
+ }
+ handle = ops->flex_item_create(dev, conf, error);
+ if (handle == NULL)
+ flow_err(port_id, -rte_errno, error);
+ return handle;
+}
+
+int
+rte_flow_flex_item_release(uint16_t port_id,
+ const struct rte_flow_item_flex_handle *handle,
+ struct rte_flow_error *error)
+{
+ int ret;
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (unlikely(!ops || !ops->flex_item_release))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+ ret = ops->flex_item_release(dev, handle, error);
+ return flow_err(port_id, ret, error);
+}
+
+int
+rte_flow_info_get(uint16_t port_id,
+ struct rte_flow_port_info *port_info,
+ struct rte_flow_queue_info *queue_info,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (unlikely(!ops))
+ return -rte_errno;
+ if (dev->data->dev_configured == 0) {
+ RTE_FLOW_LOG(INFO,
+ "Device with port_id=%"PRIu16" is not configured.\n",
+ port_id);
+ return -EINVAL;
+ }
+ if (port_info == NULL) {
+ RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
+ return -EINVAL;
+ }
+ if (likely(!!ops->info_get)) {
+ return flow_err(port_id,
+ ops->info_get(dev, port_info, queue_info, error),
+ error);
+ }
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+}
+
+int
+rte_flow_configure(uint16_t port_id,
+ const struct rte_flow_port_attr *port_attr,
+ uint16_t nb_queue,
+ const struct rte_flow_queue_attr *queue_attr[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ int ret;
+
+ if (unlikely(!ops))
+ return -rte_errno;
+ if (dev->data->dev_configured == 0) {
+ RTE_FLOW_LOG(INFO,
+ "Device with port_id=%"PRIu16" is not configured.\n",
+ port_id);
+ return -EINVAL;
+ }
+ if (dev->data->dev_started != 0) {
+ RTE_FLOW_LOG(INFO,
+ "Device with port_id=%"PRIu16" already started.\n",
+ port_id);
+ return -EINVAL;
+ }
+ if (port_attr == NULL) {
+ RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
+ return -EINVAL;
+ }
+ if (queue_attr == NULL) {
+ RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
+ return -EINVAL;
+ }
+ if (likely(!!ops->configure)) {
+ ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
+ if (ret == 0)
+ dev->data->flow_configured = 1;
+ return flow_err(port_id, ret, error);
+ }
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+}
+
+struct rte_flow_pattern_template *
+rte_flow_pattern_template_create(uint16_t port_id,
+ const struct rte_flow_pattern_template_attr *template_attr,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ struct rte_flow_pattern_template *template;
+
+ if (unlikely(!ops))
+ return NULL;
+ if (dev->data->flow_configured == 0) {
+ RTE_FLOW_LOG(INFO,
+ "Flow engine on port_id=%"PRIu16" is not configured.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_STATE,
+ NULL, rte_strerror(EINVAL));
+ return NULL;
+ }
+ if (template_attr == NULL) {
+ RTE_FLOW_LOG(ERR,
+ "Port %"PRIu16" template attr is NULL.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, rte_strerror(EINVAL));
+ return NULL;
+ }
+ if (pattern == NULL) {
+ RTE_FLOW_LOG(ERR,
+ "Port %"PRIu16" pattern is NULL.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, rte_strerror(EINVAL));
+ return NULL;
+ }
+ if (likely(!!ops->pattern_template_create)) {
+ template = ops->pattern_template_create(dev, template_attr,
+ pattern, error);
+ if (template == NULL)
+ flow_err(port_id, -rte_errno, error);
+ return template;
+ }
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+ return NULL;
+}
+
+int
+rte_flow_pattern_template_destroy(uint16_t port_id,
+ struct rte_flow_pattern_template *pattern_template,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (unlikely(!ops))
+ return -rte_errno;
+ if (unlikely(pattern_template == NULL))
+ return 0;
+ if (likely(!!ops->pattern_template_destroy)) {
+ return flow_err(port_id,
+ ops->pattern_template_destroy(dev,
+ pattern_template,
+ error),
+ error);
+ }
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+}
+
+struct rte_flow_actions_template *
+rte_flow_actions_template_create(uint16_t port_id,
+ const struct rte_flow_actions_template_attr *template_attr,
+ const struct rte_flow_action actions[],
+ const struct rte_flow_action masks[],
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ struct rte_flow_actions_template *template;
+
+ if (unlikely(!ops))
+ return NULL;
+ if (dev->data->flow_configured == 0) {
+ RTE_FLOW_LOG(INFO,
+ "Flow engine on port_id=%"PRIu16" is not configured.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_STATE,
+ NULL, rte_strerror(EINVAL));
+ return NULL;
+ }
+ if (template_attr == NULL) {
+ RTE_FLOW_LOG(ERR,
+ "Port %"PRIu16" template attr is NULL.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, rte_strerror(EINVAL));
+ return NULL;
+ }
+ if (actions == NULL) {
+ RTE_FLOW_LOG(ERR,
+ "Port %"PRIu16" actions is NULL.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, rte_strerror(EINVAL));
+ return NULL;
+ }
+ if (masks == NULL) {
+ RTE_FLOW_LOG(ERR,
+ "Port %"PRIu16" masks is NULL.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, rte_strerror(EINVAL));
+
+ }
+ if (likely(!!ops->actions_template_create)) {
+ template = ops->actions_template_create(dev, template_attr,
+ actions, masks, error);
+ if (template == NULL)
+ flow_err(port_id, -rte_errno, error);
+ return template;
+ }
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+ return NULL;
+}
+
+int
+rte_flow_actions_template_destroy(uint16_t port_id,
+ struct rte_flow_actions_template *actions_template,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (unlikely(!ops))
+ return -rte_errno;
+ if (unlikely(actions_template == NULL))
+ return 0;
+ if (likely(!!ops->actions_template_destroy)) {
+ return flow_err(port_id,
+ ops->actions_template_destroy(dev,
+ actions_template,
+ error),
+ error);
+ }
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+}
+
+struct rte_flow_template_table *
+rte_flow_template_table_create(uint16_t port_id,
+ const struct rte_flow_template_table_attr *table_attr,
+ struct rte_flow_pattern_template *pattern_templates[],
+ uint8_t nb_pattern_templates,
+ struct rte_flow_actions_template *actions_templates[],
+ uint8_t nb_actions_templates,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ struct rte_flow_template_table *table;
+
+ if (unlikely(!ops))
+ return NULL;
+ if (dev->data->flow_configured == 0) {
+ RTE_FLOW_LOG(INFO,
+ "Flow engine on port_id=%"PRIu16" is not configured.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_STATE,
+ NULL, rte_strerror(EINVAL));
+ return NULL;
+ }
+ if (table_attr == NULL) {
+ RTE_FLOW_LOG(ERR,
+ "Port %"PRIu16" table attr is NULL.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, rte_strerror(EINVAL));
+ return NULL;
+ }
+ if (pattern_templates == NULL) {
+ RTE_FLOW_LOG(ERR,
+ "Port %"PRIu16" pattern templates is NULL.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, rte_strerror(EINVAL));
+ return NULL;
+ }
+ if (actions_templates == NULL) {
+ RTE_FLOW_LOG(ERR,
+ "Port %"PRIu16" actions templates is NULL.\n",
+ port_id);
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, rte_strerror(EINVAL));
+ return NULL;
+ }
+ if (likely(!!ops->template_table_create)) {
+ table = ops->template_table_create(dev, table_attr,
+ pattern_templates, nb_pattern_templates,
+ actions_templates, nb_actions_templates,
+ error);
+ if (table == NULL)
+ flow_err(port_id, -rte_errno, error);
+ return table;
+ }
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+ return NULL;
+}
+
+int
+rte_flow_template_table_destroy(uint16_t port_id,
+ struct rte_flow_template_table *template_table,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ if (unlikely(!ops))
+ return -rte_errno;
+ if (unlikely(template_table == NULL))
+ return 0;
+ if (likely(!!ops->template_table_destroy)) {
+ return flow_err(port_id,
+ ops->template_table_destroy(dev,
+ template_table,
+ error),
+ error);
+ }
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, rte_strerror(ENOTSUP));
+}
+
+struct rte_flow *
+rte_flow_async_create(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_template_table *template_table,
+ const struct rte_flow_item pattern[],
+ uint8_t pattern_template_index,
+ const struct rte_flow_action actions[],
+ uint8_t actions_template_index,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ struct rte_flow *flow;
+
+ flow = ops->async_create(dev, queue_id,
+ op_attr, template_table,
+ pattern, pattern_template_index,
+ actions, actions_template_index,
+ user_data, error);
+ if (flow == NULL)
+ flow_err(port_id, -rte_errno, error);
+ return flow;
+}
+
+int
+rte_flow_async_destroy(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow *flow,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ return flow_err(port_id,
+ ops->async_destroy(dev, queue_id,
+ op_attr, flow,
+ user_data, error),
+ error);
+}
+
+int
+rte_flow_push(uint16_t port_id,
+ uint32_t queue_id,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+
+ return flow_err(port_id,
+ ops->push(dev, queue_id, error),
+ error);
+}
+
+int
+rte_flow_pull(uint16_t port_id,
+ uint32_t queue_id,
+ struct rte_flow_op_result res[],
+ uint16_t n_res,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ int ret;
+
+ ret = ops->pull(dev, queue_id, res, n_res, error);
+ return ret ? ret : flow_err(port_id, ret, error);
+}
+
+struct rte_flow_action_handle *
+rte_flow_async_action_handle_create(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ const struct rte_flow_indir_action_conf *indir_action_conf,
+ const struct rte_flow_action *action,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ struct rte_flow_action_handle *handle;
+
+ handle = ops->async_action_handle_create(dev, queue_id, op_attr,
+ indir_action_conf, action, user_data, error);
+ if (handle == NULL)
+ flow_err(port_id, -rte_errno, error);
+ return handle;
+}
+
+int
+rte_flow_async_action_handle_destroy(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_handle *action_handle,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ int ret;
+
+ ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
+ action_handle, user_data, error);
+ return flow_err(port_id, ret, error);
+}
+
+int
+rte_flow_async_action_handle_update(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_handle *action_handle,
+ const void *update,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ int ret;
+
+ ret = ops->async_action_handle_update(dev, queue_id, op_attr,
+ action_handle, update, user_data, error);
+ return flow_err(port_id, ret, error);
+}