priv_special_flow_disable_all(priv);
priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv);
-
+ priv_flow_flush(priv, &priv->flows);
/* Prevent crashes when queues are still in use. */
dev->rx_pkt_burst = removed_rx_burst;
dev->tx_pkt_burst = removed_tx_burst;
priv->dev = eth_dev;
eth_dev->dev_ops = &mlx5_dev_ops;
TAILQ_INIT(&priv->flows);
+ TAILQ_INIT(&priv->ctrl_flows);
/* Hint libmlx5 to use PMD allocator for data plane resources */
struct mlx5dv_ctx_allocators alctr = {
#include <limits.h>
#include <net/if.h>
#include <netinet/in.h>
+#include <sys/queue.h>
/* Verbs header. */
/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
uint64_t base[MLX5_MAX_XSTATS];
};
+/* Flow list . */
+TAILQ_HEAD(mlx5_flows, rte_flow);
+
struct priv {
struct rte_eth_dev *dev; /* Ethernet device of master process. */
struct ibv_context *ctx; /* Verbs context. */
/* Device properties. */
uint16_t mtu; /* Configured MTU. */
uint8_t port; /* Physical port number. */
- unsigned int promisc_req:1; /* Promiscuous mode requested. */
unsigned int allmulti_req:1; /* All multicast mode requested. */
unsigned int hw_csum:1; /* Checksum offload is supported. */
unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
unsigned int (*reta_idx)[]; /* RETA index table. */
unsigned int reta_idx_n; /* RETA index size. */
struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */
- TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */
+ struct mlx5_flows flows; /* RTE Flow rules. */
+ struct mlx5_flows ctrl_flows; /* Control flow rules. */
LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */
LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */
LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */
struct rte_flow_error *);
int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *,
struct rte_flow_error *);
+void priv_flow_flush(struct priv *, struct mlx5_flows *);
int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *);
int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *);
-int priv_flow_start(struct priv *);
-void priv_flow_stop(struct priv *);
+int priv_flow_start(struct priv *, struct mlx5_flows *);
+void priv_flow_stop(struct priv *, struct mlx5_flows *);
int priv_flow_verify(struct priv *);
+int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *,
+ struct rte_flow_item_eth *, unsigned int);
/* mlx5_socket.c */
#include "mlx5.h"
#include "mlx5_prm.h"
+/* Define minimal priority for control plane flows. */
+#define MLX5_CTRL_FLOW_PRIORITY 4
+
static int
mlx5_flow_create_eth(const struct rte_flow_item *item,
const void *default_mask,
"groups are not supported");
return -rte_errno;
}
- if (attr->priority) {
+ if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
NULL,
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
* @param[in] attr
* Flow rule attributes.
* @param[in] pattern
*/
static struct rte_flow *
priv_flow_create(struct priv *priv,
+ struct mlx5_flows *list,
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
rte_flow = priv_flow_create_action_queue(priv, &flow, error);
if (!rte_flow)
goto exit;
+ if (rte_flow) {
+ TAILQ_INSERT_TAIL(list, rte_flow, next);
+ DEBUG("Flow created %p", (void *)rte_flow);
+ }
return rte_flow;
exit:
rte_free(flow.ibv_attr);
struct rte_flow *flow;
priv_lock(priv);
- flow = priv_flow_create(priv, attr, items, actions, error);
- if (flow) {
- TAILQ_INSERT_TAIL(&priv->flows, flow, next);
- DEBUG("Flow created %p", (void *)flow);
- }
+ flow = priv_flow_create(priv, &priv->flows, attr, items, actions,
+ error);
priv_unlock(priv);
return flow;
}
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
* @param[in] flow
* Flow to destroy.
*/
static void
priv_flow_destroy(struct priv *priv,
+ struct mlx5_flows *list,
struct rte_flow *flow)
{
unsigned int i;
* To remove the mark from the queue, the queue must not be
* present in any other marked flow (RSS or not).
*/
- TAILQ_FOREACH(tmp, &priv->flows, next) {
+ TAILQ_FOREACH(tmp, list, next) {
unsigned int j;
if (!tmp->mark)
claim_zero(ibv_destroy_flow(flow->ibv_flow));
if (!flow->drop)
mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
- TAILQ_REMOVE(&priv->flows, flow, next);
+ TAILQ_REMOVE(list, flow, next);
rte_free(flow->ibv_attr);
DEBUG("Flow destroyed %p", (void *)flow);
rte_free(flow);
(void)error;
priv_lock(priv);
- priv_flow_destroy(priv, flow);
+ priv_flow_destroy(priv, &priv->flows, flow);
priv_unlock(priv);
return 0;
}
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*/
-static void
-priv_flow_flush(struct priv *priv)
+void
+priv_flow_flush(struct priv *priv, struct mlx5_flows *list)
{
- while (!TAILQ_EMPTY(&priv->flows)) {
+ while (!TAILQ_EMPTY(list)) {
struct rte_flow *flow;
- flow = TAILQ_FIRST(&priv->flows);
- priv_flow_destroy(priv, flow);
+ flow = TAILQ_FIRST(list);
+ priv_flow_destroy(priv, list, flow);
}
}
(void)error;
priv_lock(priv);
- priv_flow_flush(priv);
+ priv_flow_flush(priv, &priv->flows);
priv_unlock(priv);
return 0;
}
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*/
void
-priv_flow_stop(struct priv *priv)
+priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
{
struct rte_flow *flow;
- TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) {
+ TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
claim_zero(ibv_destroy_flow(flow->ibv_flow));
flow->ibv_flow = NULL;
mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
*
* @param priv
* Pointer to private structure.
+ * @param list
+ * Pointer to a TAILQ flow list.
*
* @return
* 0 on success, a errno value otherwise and rte_errno is set.
*/
int
-priv_flow_start(struct priv *priv)
+priv_flow_start(struct priv *priv, struct mlx5_flows *list)
{
int ret;
struct rte_flow *flow;
ret = priv_flow_create_drop_queue(priv);
if (ret)
return -1;
- TAILQ_FOREACH(flow, &priv->flows, next) {
+ TAILQ_FOREACH(flow, list, next) {
if (flow->frxq.hrxq)
goto flow_create;
flow->frxq.hrxq =
}
return ret;
}
+
+/**
+ * Enable/disable a control flow configured from the control plane.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param spec
+ * An Ethernet flow spec to apply.
+ * @param mask
+ * An Ethernet flow mask to apply.
+ * @param enable
+ * Enable/disable the flow.
+ *
+ * @return
+ * 0 on success.
+ */
+int
+mlx5_ctrl_flow(struct rte_eth_dev *dev,
+ struct rte_flow_item_eth *spec,
+ struct rte_flow_item_eth *mask,
+ unsigned int enable)
+{
+ struct priv *priv = dev->data->dev_private;
+ const struct rte_flow_attr attr = {
+ .ingress = 1,
+ .priority = MLX5_CTRL_FLOW_PRIORITY,
+ };
+ struct rte_flow_item items[] = {
+ {
+ .type = RTE_FLOW_ITEM_TYPE_ETH,
+ .spec = spec,
+ .last = NULL,
+ .mask = mask,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_END,
+ },
+ };
+ struct rte_flow_action actions[] = {
+ {
+ .type = RTE_FLOW_ACTION_TYPE_QUEUE,
+ .conf = &(struct rte_flow_action_queue){
+ .index = 0,
+ },
+ },
+ {
+ .type = RTE_FLOW_ACTION_TYPE_END,
+ },
+ };
+ struct rte_flow *flow;
+ struct rte_flow_error error;
+
+ if (enable) {
+ flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items,
+ actions, &error);
+ if (!flow)
+ return 1;
+ } else {
+ struct spec {
+ struct ibv_flow_attr ibv_attr;
+ struct ibv_flow_spec_eth eth;
+ } spec;
+ struct mlx5_flow_parse parser = {
+ .ibv_attr = &spec.ibv_attr,
+ .offset = sizeof(struct ibv_flow_attr),
+ };
+ struct ibv_flow_spec_eth *eth;
+ const unsigned int attr_size = sizeof(struct ibv_flow_attr);
+
+ claim_zero(mlx5_flow_create_eth(&items[0], NULL, &parser));
+ TAILQ_FOREACH(flow, &priv->ctrl_flows, next) {
+ eth = (void *)((uintptr_t)flow->ibv_attr + attr_size);
+ assert(eth->type == IBV_FLOW_SPEC_ETH);
+ if (!memcmp(eth, &spec.eth, sizeof(*eth)))
+ break;
+ }
+ if (flow) {
+ claim_zero(ibv_destroy_flow(flow->ibv_flow));
+ mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
+ rte_free(flow->ibv_attr);
+ DEBUG("Control flow destroyed %p", (void *)flow);
+ TAILQ_REMOVE(&priv->ctrl_flows, flow, next);
+ rte_free(flow);
+ }
+ }
+ return 0;
+}
/* Initialization data for special flows. */
static const struct special_flow_init special_flow_init[] = {
- [HASH_RXQ_FLOW_TYPE_PROMISC] = {
- .dst_mac_val = "\x00\x00\x00\x00\x00\x00",
- .dst_mac_mask = "\x00\x00\x00\x00\x00\x00",
- .hash_types =
- 1 << HASH_RXQ_TCPV4 |
- 1 << HASH_RXQ_UDPV4 |
- 1 << HASH_RXQ_IPV4 |
- 1 << HASH_RXQ_TCPV6 |
- 1 << HASH_RXQ_UDPV6 |
- 1 << HASH_RXQ_IPV6 |
- 1 << HASH_RXQ_ETH |
- 0,
- .per_vlan = 0,
- },
[HASH_RXQ_FLOW_TYPE_ALLMULTI] = {
.dst_mac_val = "\x01\x00\x00\x00\x00\x00",
.dst_mac_mask = "\x01\x00\x00\x00\x00\x00",
if (priv->isolated)
return 0;
- for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
+ for (flow_type = HASH_RXQ_FLOW_TYPE_ALLMULTI;
flow_type != HASH_RXQ_FLOW_TYPE_MAC;
++flow_type) {
int ret;
{
enum hash_rxq_flow_type flow_type;
- for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC;
+ for (flow_type = HASH_RXQ_FLOW_TYPE_ALLMULTI;
flow_type != HASH_RXQ_FLOW_TYPE_MAC;
++flow_type)
priv_special_flow_disable(priv, flow_type);
void
mlx5_promiscuous_enable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int ret;
+ struct rte_flow_item_eth eth = {
+ .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0,
+ };
if (mlx5_is_secondary())
return;
-
- priv_lock(priv);
- priv->promisc_req = 1;
- ret = priv_rehash_flows(priv);
- if (ret)
- ERROR("error while enabling promiscuous mode: %s",
- strerror(ret));
- priv_unlock(priv);
+ dev->data->promiscuous = 1;
+ claim_zero(mlx5_ctrl_flow(dev, ð, ð, 1));
}
/**
void
mlx5_promiscuous_disable(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- int ret;
+ struct rte_flow_item_eth eth = {
+ .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+ .type = 0,
+ };
if (mlx5_is_secondary())
return;
-
- priv_lock(priv);
- priv->promisc_req = 0;
- ret = priv_rehash_flows(priv);
- if (ret)
- ERROR("error while disabling promiscuous mode: %s",
- strerror(ret));
- priv_unlock(priv);
+ dev->data->promiscuous = 0;
+ claim_zero(mlx5_ctrl_flow(dev, ð, ð, 0));
}
/**
int
priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
{
- /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
- * has been requested. */
- if (priv->promisc_req)
- return type == HASH_RXQ_FLOW_TYPE_PROMISC;
switch (type) {
- case HASH_RXQ_FLOW_TYPE_PROMISC:
- return !!priv->promisc_req;
case HASH_RXQ_FLOW_TYPE_ALLMULTI:
return !!priv->allmulti_req;
case HASH_RXQ_FLOW_TYPE_BROADCAST:
};
enum hash_rxq_flow_type {
- HASH_RXQ_FLOW_TYPE_PROMISC,
HASH_RXQ_FLOW_TYPE_ALLMULTI,
HASH_RXQ_FLOW_TYPE_BROADCAST,
HASH_RXQ_FLOW_TYPE_IPV6MULTI,
hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type)
{
switch (flow_type) {
- case HASH_RXQ_FLOW_TYPE_PROMISC:
- return "promiscuous";
case HASH_RXQ_FLOW_TYPE_ALLMULTI:
return "allmulticast";
case HASH_RXQ_FLOW_TYPE_BROADCAST:
(void *)priv, strerror(err));
goto error;
}
- err = priv_flow_start(priv);
+ if (dev->data->promiscuous)
+ mlx5_promiscuous_enable(dev);
+ err = priv_flow_start(priv, &priv->ctrl_flows);
+ if (err) {
+ ERROR("%p: an error occurred while configuring control flows:"
+ " %s",
+ (void *)priv, strerror(err));
+ goto error;
+ }
+ err = priv_flow_start(priv, &priv->flows);
if (err) {
ERROR("%p: an error occurred while configuring flows:"
" %s",
priv_special_flow_disable_all(priv);
priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv);
- priv_flow_stop(priv);
+ priv_flow_stop(priv, &priv->flows);
+ priv_flow_flush(priv, &priv->ctrl_flows);
priv_rxq_stop(priv);
priv_txq_stop(priv);
priv_unlock(priv);
priv_special_flow_disable_all(priv);
priv_mac_addrs_disable(priv);
priv_destroy_hash_rxqs(priv);
- priv_flow_stop(priv);
+ priv_flow_stop(priv, &priv->flows);
+ priv_flow_flush(priv, &priv->ctrl_flows);
priv_rx_intr_vec_disable(priv);
+ priv_dev_interrupt_handler_uninstall(priv, dev);
priv_txq_stop(priv);
priv_rxq_stop(priv);
LIST_FOREACH(mr, &priv->mr, next) {
priv_mr_release(priv, mr);
}
- priv_dev_interrupt_handler_uninstall(priv, dev);
priv_unlock(priv);
}