From 1b37f5d898349d70aaf1b8427694ca5b7a669d8f Mon Sep 17 00:00:00 2001 From: =?utf8?q?N=C3=A9lio=20Laranjeiro?= Date: Mon, 9 Oct 2017 16:44:53 +0200 Subject: [PATCH] net/mlx5: use flow to enable promiscuous mode RSS hash configuration is currently ignored by the PMD, this commits removes the RSS feature on promiscuous mode. This functionality will be added in a later commit. Signed-off-by: Nelio Laranjeiro Acked-by: Yongseok Koh --- drivers/net/mlx5/mlx5.c | 3 +- drivers/net/mlx5/mlx5.h | 15 +++- drivers/net/mlx5/mlx5_flow.c | 141 +++++++++++++++++++++++++++----- drivers/net/mlx5/mlx5_rxmode.c | 52 ++++-------- drivers/net/mlx5/mlx5_rxq.c | 6 -- drivers/net/mlx5/mlx5_rxtx.h | 3 - drivers/net/mlx5/mlx5_trigger.c | 19 ++++- 7 files changed, 166 insertions(+), 73 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index fd8138bf4f..97d6a21294 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -201,7 +201,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) priv_special_flow_disable_all(priv); priv_mac_addrs_disable(priv); priv_destroy_hash_rxqs(priv); - + priv_flow_flush(priv, &priv->flows); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; dev->tx_pkt_burst = removed_tx_burst; @@ -884,6 +884,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->dev = eth_dev; eth_dev->dev_ops = &mlx5_dev_ops; TAILQ_INIT(&priv->flows); + TAILQ_INIT(&priv->ctrl_flows); /* Hint libmlx5 to use PMD allocator for data plane resources */ struct mlx5dv_ctx_allocators alctr = { diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 77413c9b79..2699917643 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -39,6 +39,7 @@ #include #include #include +#include /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ @@ -86,6 +87,9 @@ struct mlx5_xstats_ctrl { uint64_t base[MLX5_MAX_XSTATS]; }; +/* Flow list . */ +TAILQ_HEAD(mlx5_flows, rte_flow); + struct priv { struct rte_eth_dev *dev; /* Ethernet device of master process. */ struct ibv_context *ctx; /* Verbs context. */ @@ -104,7 +108,6 @@ struct priv { /* Device properties. */ uint16_t mtu; /* Configured MTU. */ uint8_t port; /* Physical port number. */ - unsigned int promisc_req:1; /* Promiscuous mode requested. */ unsigned int allmulti_req:1; /* All multicast mode requested. */ unsigned int hw_csum:1; /* Checksum offload is supported. */ unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */ @@ -145,7 +148,8 @@ struct priv { unsigned int (*reta_idx)[]; /* RETA index table. */ unsigned int reta_idx_n; /* RETA index size. */ struct mlx5_hrxq_drop *flow_drop_queue; /* Flow drop queue. */ - TAILQ_HEAD(mlx5_flows, rte_flow) flows; /* RTE Flow rules. */ + struct mlx5_flows flows; /* RTE Flow rules. */ + struct mlx5_flows ctrl_flows; /* Control flow rules. */ LIST_HEAD(mr, mlx5_mr) mr; /* Memory region. */ LIST_HEAD(rxq, mlx5_rxq_ctrl) rxqsctrl; /* DPDK Rx queues. */ LIST_HEAD(rxqibv, mlx5_rxq_ibv) rxqsibv; /* Verbs Rx queues. */ @@ -293,11 +297,14 @@ struct rte_flow *mlx5_flow_create(struct rte_eth_dev *, struct rte_flow_error *); int mlx5_flow_destroy(struct rte_eth_dev *, struct rte_flow *, struct rte_flow_error *); +void priv_flow_flush(struct priv *, struct mlx5_flows *); int mlx5_flow_flush(struct rte_eth_dev *, struct rte_flow_error *); int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *); -int priv_flow_start(struct priv *); -void priv_flow_stop(struct priv *); +int priv_flow_start(struct priv *, struct mlx5_flows *); +void priv_flow_stop(struct priv *, struct mlx5_flows *); int priv_flow_verify(struct priv *); +int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *, + struct rte_flow_item_eth *, unsigned int); /* mlx5_socket.c */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 494888284a..8512905e3a 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -52,6 +52,9 @@ #include "mlx5.h" #include "mlx5_prm.h" +/* Define minimal priority for control plane flows. */ +#define MLX5_CTRL_FLOW_PRIORITY 4 + static int mlx5_flow_create_eth(const struct rte_flow_item *item, const void *default_mask, @@ -451,7 +454,7 @@ priv_flow_validate(struct priv *priv, "groups are not supported"); return -rte_errno; } - if (attr->priority) { + if (attr->priority && attr->priority != MLX5_CTRL_FLOW_PRIORITY) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, @@ -1169,6 +1172,8 @@ error: * * @param priv * Pointer to private structure. + * @param list + * Pointer to a TAILQ flow list. * @param[in] attr * Flow rule attributes. * @param[in] pattern @@ -1183,6 +1188,7 @@ error: */ static struct rte_flow * priv_flow_create(struct priv *priv, + struct mlx5_flows *list, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], @@ -1232,6 +1238,10 @@ priv_flow_create(struct priv *priv, rte_flow = priv_flow_create_action_queue(priv, &flow, error); if (!rte_flow) goto exit; + if (rte_flow) { + TAILQ_INSERT_TAIL(list, rte_flow, next); + DEBUG("Flow created %p", (void *)rte_flow); + } return rte_flow; exit: rte_free(flow.ibv_attr); @@ -1255,11 +1265,8 @@ mlx5_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow; priv_lock(priv); - flow = priv_flow_create(priv, attr, items, actions, error); - if (flow) { - TAILQ_INSERT_TAIL(&priv->flows, flow, next); - DEBUG("Flow created %p", (void *)flow); - } + flow = priv_flow_create(priv, &priv->flows, attr, items, actions, + error); priv_unlock(priv); return flow; } @@ -1269,11 +1276,14 @@ mlx5_flow_create(struct rte_eth_dev *dev, * * @param priv * Pointer to private structure. + * @param list + * Pointer to a TAILQ flow list. * @param[in] flow * Flow to destroy. */ static void priv_flow_destroy(struct priv *priv, + struct mlx5_flows *list, struct rte_flow *flow) { unsigned int i; @@ -1293,7 +1303,7 @@ priv_flow_destroy(struct priv *priv, * To remove the mark from the queue, the queue must not be * present in any other marked flow (RSS or not). */ - TAILQ_FOREACH(tmp, &priv->flows, next) { + TAILQ_FOREACH(tmp, list, next) { unsigned int j; if (!tmp->mark) @@ -1313,7 +1323,7 @@ free: claim_zero(ibv_destroy_flow(flow->ibv_flow)); if (!flow->drop) mlx5_priv_hrxq_release(priv, flow->frxq.hrxq); - TAILQ_REMOVE(&priv->flows, flow, next); + TAILQ_REMOVE(list, flow, next); rte_free(flow->ibv_attr); DEBUG("Flow destroyed %p", (void *)flow); rte_free(flow); @@ -1334,7 +1344,7 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, (void)error; priv_lock(priv); - priv_flow_destroy(priv, flow); + priv_flow_destroy(priv, &priv->flows, flow); priv_unlock(priv); return 0; } @@ -1344,15 +1354,17 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, * * @param priv * Pointer to private structure. + * @param list + * Pointer to a TAILQ flow list. */ -static void -priv_flow_flush(struct priv *priv) +void +priv_flow_flush(struct priv *priv, struct mlx5_flows *list) { - while (!TAILQ_EMPTY(&priv->flows)) { + while (!TAILQ_EMPTY(list)) { struct rte_flow *flow; - flow = TAILQ_FIRST(&priv->flows); - priv_flow_destroy(priv, flow); + flow = TAILQ_FIRST(list); + priv_flow_destroy(priv, list, flow); } } @@ -1370,7 +1382,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev, (void)error; priv_lock(priv); - priv_flow_flush(priv); + priv_flow_flush(priv, &priv->flows); priv_unlock(priv); return 0; } @@ -1493,13 +1505,15 @@ priv_flow_delete_drop_queue(struct priv *priv) * * @param priv * Pointer to private structure. + * @param list + * Pointer to a TAILQ flow list. */ void -priv_flow_stop(struct priv *priv) +priv_flow_stop(struct priv *priv, struct mlx5_flows *list) { struct rte_flow *flow; - TAILQ_FOREACH_REVERSE(flow, &priv->flows, mlx5_flows, next) { + TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { claim_zero(ibv_destroy_flow(flow->ibv_flow)); flow->ibv_flow = NULL; mlx5_priv_hrxq_release(priv, flow->frxq.hrxq); @@ -1522,12 +1536,14 @@ priv_flow_stop(struct priv *priv) * * @param priv * Pointer to private structure. + * @param list + * Pointer to a TAILQ flow list. * * @return * 0 on success, a errno value otherwise and rte_errno is set. */ int -priv_flow_start(struct priv *priv) +priv_flow_start(struct priv *priv, struct mlx5_flows *list) { int ret; struct rte_flow *flow; @@ -1535,7 +1551,7 @@ priv_flow_start(struct priv *priv) ret = priv_flow_create_drop_queue(priv); if (ret) return -1; - TAILQ_FOREACH(flow, &priv->flows, next) { + TAILQ_FOREACH(flow, list, next) { if (flow->frxq.hrxq) goto flow_create; flow->frxq.hrxq = @@ -1630,3 +1646,90 @@ priv_flow_verify(struct priv *priv) } return ret; } + +/** + * Enable/disable a control flow configured from the control plane. + * + * @param dev + * Pointer to Ethernet device. + * @param spec + * An Ethernet flow spec to apply. + * @param mask + * An Ethernet flow mask to apply. + * @param enable + * Enable/disable the flow. + * + * @return + * 0 on success. + */ +int +mlx5_ctrl_flow(struct rte_eth_dev *dev, + struct rte_flow_item_eth *spec, + struct rte_flow_item_eth *mask, + unsigned int enable) +{ + struct priv *priv = dev->data->dev_private; + const struct rte_flow_attr attr = { + .ingress = 1, + .priority = MLX5_CTRL_FLOW_PRIORITY, + }; + struct rte_flow_item items[] = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = spec, + .last = NULL, + .mask = mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }; + struct rte_flow_action actions[] = { + { + .type = RTE_FLOW_ACTION_TYPE_QUEUE, + .conf = &(struct rte_flow_action_queue){ + .index = 0, + }, + }, + { + .type = RTE_FLOW_ACTION_TYPE_END, + }, + }; + struct rte_flow *flow; + struct rte_flow_error error; + + if (enable) { + flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, + actions, &error); + if (!flow) + return 1; + } else { + struct spec { + struct ibv_flow_attr ibv_attr; + struct ibv_flow_spec_eth eth; + } spec; + struct mlx5_flow_parse parser = { + .ibv_attr = &spec.ibv_attr, + .offset = sizeof(struct ibv_flow_attr), + }; + struct ibv_flow_spec_eth *eth; + const unsigned int attr_size = sizeof(struct ibv_flow_attr); + + claim_zero(mlx5_flow_create_eth(&items[0], NULL, &parser)); + TAILQ_FOREACH(flow, &priv->ctrl_flows, next) { + eth = (void *)((uintptr_t)flow->ibv_attr + attr_size); + assert(eth->type == IBV_FLOW_SPEC_ETH); + if (!memcmp(eth, &spec.eth, sizeof(*eth))) + break; + } + if (flow) { + claim_zero(ibv_destroy_flow(flow->ibv_flow)); + mlx5_priv_hrxq_release(priv, flow->frxq.hrxq); + rte_free(flow->ibv_attr); + DEBUG("Control flow destroyed %p", (void *)flow); + TAILQ_REMOVE(&priv->ctrl_flows, flow, next); + rte_free(flow); + } + } + return 0; +} diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index e9ea2aade4..f469f41089 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -53,20 +53,6 @@ /* Initialization data for special flows. */ static const struct special_flow_init special_flow_init[] = { - [HASH_RXQ_FLOW_TYPE_PROMISC] = { - .dst_mac_val = "\x00\x00\x00\x00\x00\x00", - .dst_mac_mask = "\x00\x00\x00\x00\x00\x00", - .hash_types = - 1 << HASH_RXQ_TCPV4 | - 1 << HASH_RXQ_UDPV4 | - 1 << HASH_RXQ_IPV4 | - 1 << HASH_RXQ_TCPV6 | - 1 << HASH_RXQ_UDPV6 | - 1 << HASH_RXQ_IPV6 | - 1 << HASH_RXQ_ETH | - 0, - .per_vlan = 0, - }, [HASH_RXQ_FLOW_TYPE_ALLMULTI] = { .dst_mac_val = "\x01\x00\x00\x00\x00\x00", .dst_mac_mask = "\x01\x00\x00\x00\x00\x00", @@ -346,7 +332,7 @@ priv_special_flow_enable_all(struct priv *priv) if (priv->isolated) return 0; - for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC; + for (flow_type = HASH_RXQ_FLOW_TYPE_ALLMULTI; flow_type != HASH_RXQ_FLOW_TYPE_MAC; ++flow_type) { int ret; @@ -373,7 +359,7 @@ priv_special_flow_disable_all(struct priv *priv) { enum hash_rxq_flow_type flow_type; - for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC; + for (flow_type = HASH_RXQ_FLOW_TYPE_ALLMULTI; flow_type != HASH_RXQ_FLOW_TYPE_MAC; ++flow_type) priv_special_flow_disable(priv, flow_type); @@ -388,19 +374,16 @@ priv_special_flow_disable_all(struct priv *priv) void mlx5_promiscuous_enable(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - int ret; + struct rte_flow_item_eth eth = { + .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", + .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", + .type = 0, + }; if (mlx5_is_secondary()) return; - - priv_lock(priv); - priv->promisc_req = 1; - ret = priv_rehash_flows(priv); - if (ret) - ERROR("error while enabling promiscuous mode: %s", - strerror(ret)); - priv_unlock(priv); + dev->data->promiscuous = 1; + claim_zero(mlx5_ctrl_flow(dev, ð, ð, 1)); } /** @@ -412,19 +395,16 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev) void mlx5_promiscuous_disable(struct rte_eth_dev *dev) { - struct priv *priv = dev->data->dev_private; - int ret; + struct rte_flow_item_eth eth = { + .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00", + .src.addr_bytes = "\x00\x00\x00\x00\x00\x00", + .type = 0, + }; if (mlx5_is_secondary()) return; - - priv_lock(priv); - priv->promisc_req = 0; - ret = priv_rehash_flows(priv); - if (ret) - ERROR("error while disabling promiscuous mode: %s", - strerror(ret)); - priv_unlock(priv); + dev->data->promiscuous = 0; + claim_zero(mlx5_ctrl_flow(dev, ð, ð, 0)); } /** diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 81e9eb5a91..d3d13555b1 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -571,13 +571,7 @@ priv_destroy_hash_rxqs(struct priv *priv) int priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) { - /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode - * has been requested. */ - if (priv->promisc_req) - return type == HASH_RXQ_FLOW_TYPE_PROMISC; switch (type) { - case HASH_RXQ_FLOW_TYPE_PROMISC: - return !!priv->promisc_req; case HASH_RXQ_FLOW_TYPE_ALLMULTI: return !!priv->allmulti_req; case HASH_RXQ_FLOW_TYPE_BROADCAST: diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index bb0a65d259..ffba64e81c 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -237,7 +237,6 @@ struct special_flow_init { }; enum hash_rxq_flow_type { - HASH_RXQ_FLOW_TYPE_PROMISC, HASH_RXQ_FLOW_TYPE_ALLMULTI, HASH_RXQ_FLOW_TYPE_BROADCAST, HASH_RXQ_FLOW_TYPE_IPV6MULTI, @@ -249,8 +248,6 @@ static inline const char * hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type) { switch (flow_type) { - case HASH_RXQ_FLOW_TYPE_PROMISC: - return "promiscuous"; case HASH_RXQ_FLOW_TYPE_ALLMULTI: return "allmulticast"; case HASH_RXQ_FLOW_TYPE_BROADCAST: diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index a3114993a1..085abccb4e 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -163,7 +163,16 @@ mlx5_dev_start(struct rte_eth_dev *dev) (void *)priv, strerror(err)); goto error; } - err = priv_flow_start(priv); + if (dev->data->promiscuous) + mlx5_promiscuous_enable(dev); + err = priv_flow_start(priv, &priv->ctrl_flows); + if (err) { + ERROR("%p: an error occurred while configuring control flows:" + " %s", + (void *)priv, strerror(err)); + goto error; + } + err = priv_flow_start(priv, &priv->flows); if (err) { ERROR("%p: an error occurred while configuring flows:" " %s", @@ -187,7 +196,8 @@ error: priv_special_flow_disable_all(priv); priv_mac_addrs_disable(priv); priv_destroy_hash_rxqs(priv); - priv_flow_stop(priv); + priv_flow_stop(priv, &priv->flows); + priv_flow_flush(priv, &priv->ctrl_flows); priv_rxq_stop(priv); priv_txq_stop(priv); priv_unlock(priv); @@ -222,13 +232,14 @@ mlx5_dev_stop(struct rte_eth_dev *dev) priv_special_flow_disable_all(priv); priv_mac_addrs_disable(priv); priv_destroy_hash_rxqs(priv); - priv_flow_stop(priv); + priv_flow_stop(priv, &priv->flows); + priv_flow_flush(priv, &priv->ctrl_flows); priv_rx_intr_vec_disable(priv); + priv_dev_interrupt_handler_uninstall(priv, dev); priv_txq_stop(priv); priv_rxq_stop(priv); LIST_FOREACH(mr, &priv->mr, next) { priv_mr_release(priv, mr); } - priv_dev_interrupt_handler_uninstall(priv, dev); priv_unlock(priv); } -- 2.20.1