void *user_data,
struct rte_flow_error *error);
+Enqueue indirect action creation operation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Asynchronous version of indirect action creation API.
+
+.. code-block:: c
+
+ struct rte_flow_action_handle *
+ rte_flow_async_action_handle_create(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *q_ops_attr,
+ const struct rte_flow_indir_action_conf *indir_action_conf,
+ const struct rte_flow_action *action,
+ void *user_data,
+ struct rte_flow_error *error);
+
+A valid handle in case of success is returned. It must be destroyed later by
+``rte_flow_async_action_handle_destroy()`` even if the rule was rejected.
+
+Enqueue indirect action destruction operation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Asynchronous version of indirect action destruction API.
+
+.. code-block:: c
+
+ int
+ rte_flow_async_action_handle_destroy(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *q_ops_attr,
+ struct rte_flow_action_handle *action_handle,
+ void *user_data,
+ struct rte_flow_error *error);
+
+Enqueue indirect action update operation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Asynchronous version of indirect action update API.
+
+.. code-block:: c
+
+ int
+ rte_flow_async_action_handle_update(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *q_ops_attr,
+ struct rte_flow_action_handle *action_handle,
+ const void *update,
+ void *user_data,
+ struct rte_flow_error *error);
+
Push enqueued operations
~~~~~~~~~~~~~~~~~~~~~~~~
``rte_flow_pull`` to poll and retrieve results of these operations and
``rte_flow_push`` to push all the in-flight operations to the NIC.
+ * Added asynchronous API for indirect actions management:
+ ``rte_flow_async_action_handle_create``,
+ ``rte_flow_async_action_handle_destroy`` and
+ ``rte_flow_async_action_handle_update``.
+
* **Added rte_flow support for matching GRE optional fields.**
Added ``gre_option`` item in rte_flow to support checksum/key/sequence
ret = ops->pull(dev, queue_id, res, n_res, error);
return ret ? ret : flow_err(port_id, ret, error);
}
+
+struct rte_flow_action_handle *
+rte_flow_async_action_handle_create(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ const struct rte_flow_indir_action_conf *indir_action_conf,
+ const struct rte_flow_action *action,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ struct rte_flow_action_handle *handle;
+
+ handle = ops->async_action_handle_create(dev, queue_id, op_attr,
+ indir_action_conf, action, user_data, error);
+ if (handle == NULL)
+ flow_err(port_id, -rte_errno, error);
+ return handle;
+}
+
+int
+rte_flow_async_action_handle_destroy(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_handle *action_handle,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ int ret;
+
+ ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
+ action_handle, user_data, error);
+ return flow_err(port_id, ret, error);
+}
+
+int
+rte_flow_async_action_handle_update(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_handle *action_handle,
+ const void *update,
+ void *user_data,
+ struct rte_flow_error *error)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+ const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
+ int ret;
+
+ ret = ops->async_action_handle_update(dev, queue_id, op_attr,
+ action_handle, update, user_data, error);
+ return flow_err(port_id, ret, error);
+}
uint16_t n_res,
struct rte_flow_error *error);
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue indirect action creation operation.
+ * @see rte_flow_action_handle_create
+ *
+ * @param[in] port_id
+ * Port identifier of Ethernet device.
+ * @param[in] queue_id
+ * Flow queue which is used to create the rule.
+ * @param[in] op_attr
+ * Indirect action creation operation attributes.
+ * @param[in] indir_action_conf
+ * Action configuration for the indirect action object creation.
+ * @param[in] action
+ * Specific configuration of the indirect action object.
+ * @param[in] user_data
+ * The user data that will be returned on the completion events.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * PMDs initialize this structure in case of error only.
+ *
+ * @return
+ * A valid handle in case of success, NULL otherwise and rte_errno is set.
+ */
+__rte_experimental
+struct rte_flow_action_handle *
+rte_flow_async_action_handle_create(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ const struct rte_flow_indir_action_conf *indir_action_conf,
+ const struct rte_flow_action *action,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue indirect action destruction operation.
+ * The destroy queue must be the same
+ * as the queue on which the action was created.
+ *
+ * @param[in] port_id
+ * Port identifier of Ethernet device.
+ * @param[in] queue_id
+ * Flow queue which is used to destroy the rule.
+ * @param[in] op_attr
+ * Indirect action destruction operation attributes.
+ * @param[in] action_handle
+ * Handle for the indirect action object to be destroyed.
+ * @param[in] user_data
+ * The user data that will be returned on the completion events.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * PMDs initialize this structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+__rte_experimental
+int
+rte_flow_async_action_handle_destroy(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_handle *action_handle,
+ void *user_data,
+ struct rte_flow_error *error);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Enqueue indirect action update operation.
+ * @see rte_flow_action_handle_create
+ *
+ * @param[in] port_id
+ * Port identifier of Ethernet device.
+ * @param[in] queue_id
+ * Flow queue which is used to update the rule.
+ * @param[in] op_attr
+ * Indirect action update operation attributes.
+ * @param[in] action_handle
+ * Handle for the indirect action object to be updated.
+ * @param[in] update
+ * Update profile specification used to modify the action pointed by handle.
+ * *update* could be with the same type of the immediate action corresponding
+ * to the *handle* argument when creating, or a wrapper structure includes
+ * action configuration to be updated and bit fields to indicate the member
+ * of fields inside the action to update.
+ * @param[in] user_data
+ * The user data that will be returned on the completion events.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ * PMDs initialize this structure in case of error only.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+__rte_experimental
+int
+rte_flow_async_action_handle_update(uint16_t port_id,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_handle *action_handle,
+ const void *update,
+ void *user_data,
+ struct rte_flow_error *error);
#ifdef __cplusplus
}
#endif
struct rte_flow_op_result res[],
uint16_t n_res,
struct rte_flow_error *error);
+ /** See rte_flow_async_action_handle_create() */
+ struct rte_flow_action_handle *(*async_action_handle_create)
+ (struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ const struct rte_flow_indir_action_conf *indir_action_conf,
+ const struct rte_flow_action *action,
+ void *user_data,
+ struct rte_flow_error *err);
+ /** See rte_flow_async_action_handle_destroy() */
+ int (*async_action_handle_destroy)
+ (struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_handle *action_handle,
+ void *user_data,
+ struct rte_flow_error *error);
+ /** See rte_flow_async_action_handle_update() */
+ int (*async_action_handle_update)
+ (struct rte_eth_dev *dev,
+ uint32_t queue_id,
+ const struct rte_flow_op_attr *op_attr,
+ struct rte_flow_action_handle *action_handle,
+ const void *update,
+ void *user_data,
+ struct rte_flow_error *error);
};
/**
rte_flow_async_destroy;
rte_flow_push;
rte_flow_pull;
+ rte_flow_async_action_handle_create;
+ rte_flow_async_action_handle_destroy;
+ rte_flow_async_action_handle_update;
};
INTERNAL {