#include <rte_ethdev.h>
#include <rte_ethdev_pci.h>
#include <rte_ether.h>
+#include <rte_flow.h>
#include <rte_interrupts.h>
#include <rte_kvargs.h>
#include <rte_malloc.h>
mlx4_dev_configure(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
int ret;
/* Prepare internal flow rules. */
- ret = mlx4_flow_sync(priv);
- if (ret)
- ERROR("cannot set up internal flow rules: %s",
- strerror(-ret));
+ ret = mlx4_flow_sync(priv, &error);
+ if (ret) {
+ ERROR("cannot set up internal flow rules (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ -ret, strerror(-ret), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+ }
return ret;
}
mlx4_dev_start(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
int ret;
if (priv->started)
(void *)dev);
goto err;
}
- ret = mlx4_flow_start(priv);
+ ret = mlx4_flow_sync(priv, &error);
if (ret) {
- ERROR("%p: flow start failed: %s",
- (void *)dev, strerror(ret));
+ ERROR("%p: cannot attach flow rules (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ (void *)dev,
+ -ret, strerror(-ret), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
goto err;
}
return 0;
return;
DEBUG("%p: detaching flows from all RX queues", (void *)dev);
priv->started = 0;
- mlx4_flow_stop(priv);
+ mlx4_flow_sync(priv, NULL);
mlx4_intr_uninstall(priv);
}
if (!!enable == !!priv->isolated)
return 0;
priv->isolated = !!enable;
- if (mlx4_flow_sync(priv)) {
+ if (mlx4_flow_sync(priv, error)) {
priv->isolated = !enable;
- return rte_flow_error_set(error, rte_errno,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL,
- enable ?
- "cannot enter isolated mode" :
- "cannot leave isolated mode");
+ return -rte_errno;
}
return 0;
}
*
* @param priv
* Pointer to private structure.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
int
-mlx4_flow_sync(struct priv *priv)
+mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
{
struct rte_flow *flow;
int ret;
for (flow = LIST_FIRST(&priv->flows);
flow && flow->internal;
flow = LIST_FIRST(&priv->flows))
- claim_zero(mlx4_flow_destroy(priv->dev, flow, NULL));
+ claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
} else if (!LIST_FIRST(&priv->flows) ||
!LIST_FIRST(&priv->flows)->internal) {
/*
* If the first rule is not internal outside isolated mode,
* they must be added back.
*/
- ret = mlx4_flow_internal(priv, NULL);
+ ret = mlx4_flow_internal(priv, error);
+ if (ret)
+ return ret;
+ }
+ /* Toggle the remaining flow rules . */
+ for (flow = LIST_FIRST(&priv->flows);
+ flow;
+ flow = LIST_NEXT(flow, next)) {
+ ret = mlx4_flow_toggle(priv, flow, priv->started, error);
if (ret)
return ret;
}
- if (priv->started)
- return mlx4_flow_start(priv);
- mlx4_flow_stop(priv);
+ if (!priv->started)
+ assert(!priv->drop);
return 0;
}
mlx4_flow_destroy(priv->dev, flow, NULL);
}
-/**
- * Disable flow rules.
- *
- * @param priv
- * Pointer to private structure.
- */
-void
-mlx4_flow_stop(struct priv *priv)
-{
- struct rte_flow *flow;
-
- for (flow = LIST_FIRST(&priv->flows);
- flow;
- flow = LIST_NEXT(flow, next)) {
- claim_zero(mlx4_flow_toggle(priv, flow, 0, NULL));
- }
- assert(!priv->drop);
-}
-
-/**
- * Enable flow rules.
- *
- * @param priv
- * Pointer to private structure.
- *
- * @return
- * 0 on success, a negative errno value otherwise and rte_errno is set.
- */
-int
-mlx4_flow_start(struct priv *priv)
-{
- int ret;
- struct rte_flow *flow;
-
- for (flow = LIST_FIRST(&priv->flows);
- flow;
- flow = LIST_NEXT(flow, next)) {
- ret = mlx4_flow_toggle(priv, flow, 1, NULL);
- if (unlikely(ret)) {
- mlx4_flow_stop(priv);
- return ret;
- }
- }
- return 0;
-}
-
static const struct rte_flow_ops mlx4_flow_ops = {
.validate = mlx4_flow_validate,
.create = mlx4_flow_create,
/* mlx4_flow.c */
-int mlx4_flow_sync(struct priv *priv);
+int mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error);
void mlx4_flow_clean(struct priv *priv);
-int mlx4_flow_start(struct priv *priv);
-void mlx4_flow_stop(struct priv *priv);
int mlx4_filter_ctrl(struct rte_eth_dev *dev,
enum rte_filter_type filter_type,
enum rte_filter_op filter_op,
#include <rte_common.h>
#include <rte_errno.h>
#include <rte_ethdev.h>
+#include <rte_flow.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_mempool.h>
}
dev->data->rx_queues[idx] = NULL;
/* Disable associated flows. */
- mlx4_flow_sync(priv);
+ mlx4_flow_sync(priv, NULL);
mlx4_rxq_cleanup(rxq);
} else {
rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket);
if (ret) {
rte_free(rxq);
} else {
+ struct rte_flow_error error;
+
rxq->stats.idx = idx;
DEBUG("%p: adding Rx queue %p to list",
(void *)dev, (void *)rxq);
dev->data->rx_queues[idx] = rxq;
/* Re-enable associated flows. */
- ret = mlx4_flow_sync(priv);
+ ret = mlx4_flow_sync(priv, &error);
if (ret) {
+ ERROR("cannot re-attach flow rules to queue %u"
+ " (code %d, \"%s\"), flow error type %d,"
+ " cause %p, message: %s", idx,
+ -ret, strerror(-ret), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
dev->data->rx_queues[idx] = NULL;
mlx4_rxq_cleanup(rxq);
rte_free(rxq);
priv->dev->data->rx_queues[i] = NULL;
break;
}
- mlx4_flow_sync(priv);
+ mlx4_flow_sync(priv, NULL);
mlx4_rxq_cleanup(rxq);
rte_free(rxq);
}