net/mlx5: use flow to enable unicast traffic
authorNélio Laranjeiro <nelio.laranjeiro@6wind.com>
Mon, 9 Oct 2017 14:44:55 +0000 (16:44 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 12 Oct 2017 00:36:58 +0000 (01:36 +0100)
RSS hash configuration is currently ignored by the PMD, this commits
removes the RSS feature.

This functionality will be added in a later commit.

Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro@6wind.com>
Acked-by: Yongseok Koh <yskoh@mellanox.com>
drivers/net/mlx5/mlx5.c
drivers/net/mlx5/mlx5.h
drivers/net/mlx5/mlx5_defs.h
drivers/net/mlx5/mlx5_flow.c
drivers/net/mlx5/mlx5_mac.c
drivers/net/mlx5/mlx5_rxmode.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mlx5/mlx5_rxtx.h
drivers/net/mlx5/mlx5_trigger.c
drivers/net/mlx5/mlx5_vlan.c

index 97d6a21..c818cf8 100644 (file)
@@ -198,10 +198,8 @@ mlx5_dev_close(struct rte_eth_dev *dev)
              ((priv->ctx != NULL) ? priv->ctx->device->name : ""));
        /* In case mlx5_dev_stop() has not been called. */
        priv_dev_interrupt_handler_uninstall(priv, dev);
-       priv_special_flow_disable_all(priv);
-       priv_mac_addrs_disable(priv);
        priv_destroy_hash_rxqs(priv);
-       priv_flow_flush(priv, &priv->flows);
+       priv_dev_traffic_disable(priv, dev);
        /* Prevent crashes when queues are still in use. */
        dev->rx_pkt_burst = removed_rx_burst;
        dev->tx_pkt_burst = removed_tx_burst;
@@ -843,10 +841,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                     mac.addr_bytes[0], mac.addr_bytes[1],
                     mac.addr_bytes[2], mac.addr_bytes[3],
                     mac.addr_bytes[4], mac.addr_bytes[5]);
-               /* Register MAC address. */
-               claim_zero(priv_mac_addr_add(priv, 0,
-                                            (const uint8_t (*)[ETHER_ADDR_LEN])
-                                            mac.addr_bytes));
 #ifndef NDEBUG
                {
                        char ifname[IF_NAMESIZE];
@@ -883,6 +877,8 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                eth_dev->device->driver = &mlx5_driver.driver;
                priv->dev = eth_dev;
                eth_dev->dev_ops = &mlx5_dev_ops;
+               /* Register MAC address. */
+               claim_zero(mlx5_mac_addr_add(eth_dev, &mac, 0, 0));
                TAILQ_INIT(&priv->flows);
                TAILQ_INIT(&priv->ctrl_flows);
 
index 45673b1..e83961f 100644 (file)
@@ -96,13 +96,7 @@ struct priv {
        struct ibv_device_attr_ex device_attr; /* Device properties. */
        struct ibv_pd *pd; /* Protection Domain. */
        char ibdev_path[IBV_SYSFS_PATH_MAX]; /* IB device path for secondary */
-       /*
-        * MAC addresses array and configuration bit-field.
-        * An extra entry that cannot be modified by the DPDK is reserved
-        * for broadcast frames (destination MAC address ff:ff:ff:ff:ff:ff).
-        */
-       struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES];
-       BITFIELD_DECLARE(mac_configured, uint32_t, MLX5_MAX_MAC_ADDRESSES);
+       struct ether_addr mac[MLX5_MAX_MAC_ADDRESSES]; /* MAC addresses. */
        uint16_t vlan_filter[MLX5_MAX_VLAN_IDS]; /* VLAN filters table. */
        unsigned int vlan_filter_n; /* Number of configured VLAN filters. */
        /* Device properties. */
@@ -225,13 +219,7 @@ void priv_dev_select_rx_function(struct priv *priv, struct rte_eth_dev *dev);
 /* mlx5_mac.c */
 
 int priv_get_mac(struct priv *, uint8_t (*)[ETHER_ADDR_LEN]);
-void hash_rxq_mac_addrs_del(struct hash_rxq *);
-void priv_mac_addrs_disable(struct priv *);
 void mlx5_mac_addr_remove(struct rte_eth_dev *, uint32_t);
-int hash_rxq_mac_addrs_add(struct hash_rxq *);
-int priv_mac_addr_add(struct priv *, unsigned int,
-                     const uint8_t (*)[ETHER_ADDR_LEN]);
-int priv_mac_addrs_enable(struct priv *);
 int mlx5_mac_addr_add(struct rte_eth_dev *, struct ether_addr *, uint32_t,
                      uint32_t);
 void mlx5_mac_addr_set(struct rte_eth_dev *, struct ether_addr *);
@@ -250,10 +238,6 @@ int mlx5_dev_rss_reta_update(struct rte_eth_dev *,
 
 /* mlx5_rxmode.c */
 
-int priv_special_flow_enable(struct priv *, enum hash_rxq_flow_type);
-void priv_special_flow_disable(struct priv *, enum hash_rxq_flow_type);
-int priv_special_flow_enable_all(struct priv *);
-void priv_special_flow_disable_all(struct priv *);
 void mlx5_promiscuous_enable(struct rte_eth_dev *);
 void mlx5_promiscuous_disable(struct rte_eth_dev *);
 void mlx5_allmulticast_enable(struct rte_eth_dev *);
@@ -280,6 +264,10 @@ void mlx5_vlan_strip_queue_set(struct rte_eth_dev *, uint16_t, int);
 
 int mlx5_dev_start(struct rte_eth_dev *);
 void mlx5_dev_stop(struct rte_eth_dev *);
+int priv_dev_traffic_enable(struct priv *, struct rte_eth_dev *);
+int priv_dev_traffic_disable(struct priv *, struct rte_eth_dev *);
+int priv_dev_traffic_restart(struct priv *, struct rte_eth_dev *);
+int mlx5_traffic_restart(struct rte_eth_dev *);
 
 /* mlx5_flow.c */
 
@@ -302,8 +290,13 @@ int mlx5_flow_isolate(struct rte_eth_dev *, int, struct rte_flow_error *);
 int priv_flow_start(struct priv *, struct mlx5_flows *);
 void priv_flow_stop(struct priv *, struct mlx5_flows *);
 int priv_flow_verify(struct priv *);
+int mlx5_ctrl_flow_vlan(struct rte_eth_dev *, struct rte_flow_item_eth *,
+                       struct rte_flow_item_eth *, struct rte_flow_item_vlan *,
+                       struct rte_flow_item_vlan *);
 int mlx5_ctrl_flow(struct rte_eth_dev *, struct rte_flow_item_eth *,
-                  struct rte_flow_item_eth *, unsigned int);
+                  struct rte_flow_item_eth *);
+int priv_flow_create_drop_queue(struct priv *);
+void priv_flow_delete_drop_queue(struct priv *);
 
 /* mlx5_socket.c */
 
index 59ff00d..3a7706c 100644 (file)
@@ -45,9 +45,6 @@
 /* Maximum number of simultaneous VLAN filters. */
 #define MLX5_MAX_VLAN_IDS 128
 
-/* Maximum number of special flows. */
-#define MLX5_MAX_SPECIAL_FLOWS 4
-
 /*
  * Request TX completion every time descriptors reach this threshold since
  * the previous request. Must be a power of two for performance reasons.
index 8512905..83c75f4 100644 (file)
@@ -1128,20 +1128,19 @@ priv_flow_create_action_queue(struct priv *priv,
                                                 flow->hash_fields,
                                                 (*rte_flow->queues),
                                                 rte_flow->queues_n);
-       if (rte_flow->frxq.hrxq) {
-               rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-                                  NULL, "duplicated flow");
-               goto error;
-       }
-       rte_flow->frxq.hrxq = mlx5_priv_hrxq_new(priv, rss_hash_default_key,
-                                                rss_hash_default_key_len,
-                                                flow->hash_fields,
-                                                (*rte_flow->queues),
-                                                rte_flow->queues_n);
        if (!rte_flow->frxq.hrxq) {
-               rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
-                                  NULL, "cannot create hash rxq");
-               goto error;
+               rte_flow->frxq.hrxq =
+                       mlx5_priv_hrxq_new(priv, rss_hash_default_key,
+                                          rss_hash_default_key_len,
+                                          flow->hash_fields,
+                                          (*rte_flow->queues),
+                                          rte_flow->queues_n);
+               if (!rte_flow->frxq.hrxq) {
+                       rte_flow_error_set(error, ENOMEM,
+                                          RTE_FLOW_ERROR_TYPE_HANDLE,
+                                          NULL, "cannot create hash rxq");
+                       goto error;
+               }
        }
        for (i = 0; i != flow->actions.queues_n; ++i) {
                struct mlx5_rxq_data *q =
@@ -1396,7 +1395,7 @@ mlx5_flow_flush(struct rte_eth_dev *dev,
  * @return
  *   0 on success.
  */
-static int
+int
 priv_flow_create_drop_queue(struct priv *priv)
 {
        struct mlx5_hrxq_drop *fdq = NULL;
@@ -1479,7 +1478,7 @@ error:
  * @param priv
  *   Pointer to private structure.
  */
-static void
+void
 priv_flow_delete_drop_queue(struct priv *priv)
 {
        struct mlx5_hrxq_drop *fdq = priv->flow_drop_queue;
@@ -1501,8 +1500,6 @@ priv_flow_delete_drop_queue(struct priv *priv)
 /**
  * Remove all flows.
  *
- * Called by dev_stop() to remove all flows.
- *
  * @param priv
  *   Pointer to private structure.
  * @param list
@@ -1528,7 +1525,6 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
                }
                DEBUG("Flow %p removed", (void *)flow);
        }
-       priv_flow_delete_drop_queue(priv);
 }
 
 /**
@@ -1545,12 +1541,8 @@ priv_flow_stop(struct priv *priv, struct mlx5_flows *list)
 int
 priv_flow_start(struct priv *priv, struct mlx5_flows *list)
 {
-       int ret;
        struct rte_flow *flow;
 
-       ret = priv_flow_create_drop_queue(priv);
-       if (ret)
-               return -1;
        TAILQ_FOREACH(flow, list, next) {
                if (flow->frxq.hrxq)
                        goto flow_create;
@@ -1648,25 +1640,28 @@ priv_flow_verify(struct priv *priv)
 }
 
 /**
- * Enable/disable a control flow configured from the control plane.
+ * Enable a control flow configured from the control plane.
  *
  * @param dev
  *   Pointer to Ethernet device.
- * @param spec
+ * @param eth_spec
  *   An Ethernet flow spec to apply.
- * @param mask
+ * @param eth_mask
  *   An Ethernet flow mask to apply.
- * @param enable
- *   Enable/disable the flow.
+ * @param vlan_spec
+ *   A VLAN flow spec to apply.
+ * @param vlan_mask
+ *   A VLAN flow mask to apply.
  *
  * @return
  *   0 on success.
  */
 int
-mlx5_ctrl_flow(struct rte_eth_dev *dev,
-              struct rte_flow_item_eth *spec,
-              struct rte_flow_item_eth *mask,
-              unsigned int enable)
+mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
+                   struct rte_flow_item_eth *eth_spec,
+                   struct rte_flow_item_eth *eth_mask,
+                   struct rte_flow_item_vlan *vlan_spec,
+                   struct rte_flow_item_vlan *vlan_mask)
 {
        struct priv *priv = dev->data->dev_private;
        const struct rte_flow_attr attr = {
@@ -1676,9 +1671,16 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
        struct rte_flow_item items[] = {
                {
                        .type = RTE_FLOW_ITEM_TYPE_ETH,
-                       .spec = spec,
+                       .spec = eth_spec,
+                       .last = NULL,
+                       .mask = eth_mask,
+               },
+               {
+                       .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
+                               RTE_FLOW_ITEM_TYPE_END,
+                       .spec = vlan_spec,
                        .last = NULL,
-                       .mask = mask,
+                       .mask = vlan_mask,
                },
                {
                        .type = RTE_FLOW_ITEM_TYPE_END,
@@ -1698,38 +1700,30 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev,
        struct rte_flow *flow;
        struct rte_flow_error error;
 
-       if (enable) {
-               flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items,
-                                       actions, &error);
-               if (!flow)
-                       return 1;
-       } else {
-               struct spec {
-                       struct ibv_flow_attr ibv_attr;
-                       struct ibv_flow_spec_eth eth;
-               } spec;
-               struct mlx5_flow_parse parser = {
-                       .ibv_attr = &spec.ibv_attr,
-                       .offset = sizeof(struct ibv_flow_attr),
-               };
-               struct ibv_flow_spec_eth *eth;
-               const unsigned int attr_size = sizeof(struct ibv_flow_attr);
-
-               claim_zero(mlx5_flow_create_eth(&items[0], NULL, &parser));
-               TAILQ_FOREACH(flow, &priv->ctrl_flows, next) {
-                       eth = (void *)((uintptr_t)flow->ibv_attr + attr_size);
-                       assert(eth->type == IBV_FLOW_SPEC_ETH);
-                       if (!memcmp(eth, &spec.eth, sizeof(*eth)))
-                               break;
-               }
-               if (flow) {
-                       claim_zero(ibv_destroy_flow(flow->ibv_flow));
-                       mlx5_priv_hrxq_release(priv, flow->frxq.hrxq);
-                       rte_free(flow->ibv_attr);
-                       DEBUG("Control flow destroyed %p", (void *)flow);
-                       TAILQ_REMOVE(&priv->ctrl_flows, flow, next);
-                       rte_free(flow);
-               }
-       }
+       flow = priv_flow_create(priv, &priv->ctrl_flows, &attr, items, actions,
+                               &error);
+       if (!flow)
+               return rte_errno;
        return 0;
 }
+
+/**
+ * Enable a flow control configured from the control plane.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param eth_spec
+ *   An Ethernet flow spec to apply.
+ * @param eth_mask
+ *   An Ethernet flow mask to apply.
+ *
+ * @return
+ *   0 on success.
+ */
+int
+mlx5_ctrl_flow(struct rte_eth_dev *dev,
+              struct rte_flow_item_eth *eth_spec,
+              struct rte_flow_item_eth *eth_mask)
+{
+       return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
+}
index 086af58..d17b991 100644 (file)
@@ -82,112 +82,6 @@ priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN])
        return 0;
 }
 
-/**
- * Delete MAC flow steering rule.
- *
- * @param hash_rxq
- *   Pointer to hash RX queue structure.
- * @param mac_index
- *   MAC address index.
- * @param vlan_index
- *   VLAN index to use.
- */
-static void
-hash_rxq_del_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index,
-                     unsigned int vlan_index)
-{
-#ifndef NDEBUG
-       const uint8_t (*mac)[ETHER_ADDR_LEN] =
-               (const uint8_t (*)[ETHER_ADDR_LEN])
-               hash_rxq->priv->mac[mac_index].addr_bytes;
-#endif
-
-       assert(mac_index < RTE_DIM(hash_rxq->mac_flow));
-       assert(vlan_index < RTE_DIM(hash_rxq->mac_flow[mac_index]));
-       if (hash_rxq->mac_flow[mac_index][vlan_index] == NULL)
-               return;
-       DEBUG("%p: removing MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u"
-             " VLAN index %u",
-             (void *)hash_rxq,
-             (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],
-             mac_index,
-             vlan_index);
-       claim_zero(ibv_destroy_flow(hash_rxq->mac_flow
-                                   [mac_index][vlan_index]));
-       hash_rxq->mac_flow[mac_index][vlan_index] = NULL;
-}
-
-/**
- * Unregister a MAC address from a hash RX queue.
- *
- * @param hash_rxq
- *   Pointer to hash RX queue structure.
- * @param mac_index
- *   MAC address index.
- */
-static void
-hash_rxq_mac_addr_del(struct hash_rxq *hash_rxq, unsigned int mac_index)
-{
-       unsigned int i;
-
-       assert(mac_index < RTE_DIM(hash_rxq->mac_flow));
-       for (i = 0; (i != RTE_DIM(hash_rxq->mac_flow[mac_index])); ++i)
-               hash_rxq_del_mac_flow(hash_rxq, mac_index, i);
-}
-
-/**
- * Unregister all MAC addresses from a hash RX queue.
- *
- * @param hash_rxq
- *   Pointer to hash RX queue structure.
- */
-void
-hash_rxq_mac_addrs_del(struct hash_rxq *hash_rxq)
-{
-       unsigned int i;
-
-       for (i = 0; (i != RTE_DIM(hash_rxq->mac_flow)); ++i)
-               hash_rxq_mac_addr_del(hash_rxq, i);
-}
-
-/**
- * Unregister a MAC address.
- *
- * This is done for each hash RX queue.
- *
- * @param priv
- *   Pointer to private structure.
- * @param mac_index
- *   MAC address index.
- */
-static void
-priv_mac_addr_del(struct priv *priv, unsigned int mac_index)
-{
-       unsigned int i;
-
-       assert(mac_index < RTE_DIM(priv->mac));
-       if (!BITFIELD_ISSET(priv->mac_configured, mac_index))
-               return;
-       for (i = 0; (i != priv->hash_rxqs_n); ++i)
-               hash_rxq_mac_addr_del(&(*priv->hash_rxqs)[i], mac_index);
-       BITFIELD_RESET(priv->mac_configured, mac_index);
-}
-
-/**
- * Unregister all MAC addresses from all hash RX queues.
- *
- * @param priv
- *   Pointer to private structure.
- */
-void
-priv_mac_addrs_disable(struct priv *priv)
-{
-       unsigned int i;
-
-       for (i = 0; (i != priv->hash_rxqs_n); ++i)
-               hash_rxq_mac_addrs_del(&(*priv->hash_rxqs)[i]);
-}
-
 /**
  * DPDK callback to remove a MAC address.
  *
@@ -199,262 +93,12 @@ priv_mac_addrs_disable(struct priv *priv)
 void
 mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
 {
-       struct priv *priv = dev->data->dev_private;
-
        if (mlx5_is_secondary())
                return;
-
-       priv_lock(priv);
-       DEBUG("%p: removing MAC address from index %" PRIu32,
-             (void *)dev, index);
-       if (index >= RTE_DIM(priv->mac))
-               goto end;
-       priv_mac_addr_del(priv, index);
-end:
-       priv_unlock(priv);
-}
-
-/**
- * Add MAC flow steering rule.
- *
- * @param hash_rxq
- *   Pointer to hash RX queue structure.
- * @param mac_index
- *   MAC address index to register.
- * @param vlan_index
- *   VLAN index to use.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-static int
-hash_rxq_add_mac_flow(struct hash_rxq *hash_rxq, unsigned int mac_index,
-                     unsigned int vlan_index)
-{
-       struct ibv_flow *flow;
-       struct priv *priv = hash_rxq->priv;
-       const uint8_t (*mac)[ETHER_ADDR_LEN] =
-                       (const uint8_t (*)[ETHER_ADDR_LEN])
-                       priv->mac[mac_index].addr_bytes;
-       FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));
-       struct ibv_flow_attr *attr = &data->attr;
-       struct ibv_flow_spec_eth *spec = &data->spec;
-       unsigned int vlan_enabled = !!priv->vlan_filter_n;
-       unsigned int vlan_id = priv->vlan_filter[vlan_index];
-
-       assert(mac_index < RTE_DIM(hash_rxq->mac_flow));
-       assert(vlan_index < RTE_DIM(hash_rxq->mac_flow[mac_index]));
-       if (hash_rxq->mac_flow[mac_index][vlan_index] != NULL)
-               return 0;
-       /*
-        * No padding must be inserted by the compiler between attr and spec.
-        * This layout is expected by libibverbs.
-        */
-       assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
-       priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);
-       /* The first specification must be Ethernet. */
-       assert(spec->type == IBV_FLOW_SPEC_ETH);
-       assert(spec->size == sizeof(*spec));
-       *spec = (struct ibv_flow_spec_eth){
-               .type = IBV_FLOW_SPEC_ETH,
-               .size = sizeof(*spec),
-               .val = {
-                       .dst_mac = {
-                               (*mac)[0], (*mac)[1], (*mac)[2],
-                               (*mac)[3], (*mac)[4], (*mac)[5]
-                       },
-                       .vlan_tag = (vlan_enabled ?
-                                    rte_cpu_to_be_16(vlan_id)
-                                    : 0),
-               },
-               .mask = {
-                       .dst_mac = "\xff\xff\xff\xff\xff\xff",
-                       .vlan_tag = (vlan_enabled ?
-                                    rte_cpu_to_be_16(0xfff) :
-                                    0),
-               },
-       };
-       DEBUG("%p: adding MAC address %02x:%02x:%02x:%02x:%02x:%02x index %u"
-             " VLAN index %u filtering %s, ID %u",
-             (void *)hash_rxq,
-             (*mac)[0], (*mac)[1], (*mac)[2], (*mac)[3], (*mac)[4], (*mac)[5],
-             mac_index,
-             vlan_index,
-             (vlan_enabled ? "enabled" : "disabled"),
-             vlan_id);
-       /* Create related flow. */
-       errno = 0;
-       flow = ibv_create_flow(hash_rxq->qp, attr);
-       if (flow == NULL) {
-               /* It's not clear whether errno is always set in this case. */
-               ERROR("%p: flow configuration failed, errno=%d: %s",
-                     (void *)hash_rxq, errno,
-                     (errno ? strerror(errno) : "Unknown error"));
-               if (errno)
-                       return errno;
-               return EINVAL;
-       }
-       hash_rxq->mac_flow[mac_index][vlan_index] = flow;
-       return 0;
-}
-
-/**
- * Register a MAC address in a hash RX queue.
- *
- * @param hash_rxq
- *   Pointer to hash RX queue structure.
- * @param mac_index
- *   MAC address index to register.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-static int
-hash_rxq_mac_addr_add(struct hash_rxq *hash_rxq, unsigned int mac_index)
-{
-       struct priv *priv = hash_rxq->priv;
-       unsigned int i = 0;
-       int ret;
-
-       assert(mac_index < RTE_DIM(hash_rxq->mac_flow));
-       assert(RTE_DIM(hash_rxq->mac_flow[mac_index]) ==
-              RTE_DIM(priv->vlan_filter));
-       /* Add a MAC address for each VLAN filter, or at least once. */
-       do {
-               ret = hash_rxq_add_mac_flow(hash_rxq, mac_index, i);
-               if (ret) {
-                       /* Failure, rollback. */
-                       while (i != 0)
-                               hash_rxq_del_mac_flow(hash_rxq, mac_index,
-                                                     --i);
-                       return ret;
-               }
-       } while (++i < priv->vlan_filter_n);
-       return 0;
-}
-
-/**
- * Register all MAC addresses in a hash RX queue.
- *
- * @param hash_rxq
- *   Pointer to hash RX queue structure.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-int
-hash_rxq_mac_addrs_add(struct hash_rxq *hash_rxq)
-{
-       struct priv *priv = hash_rxq->priv;
-       unsigned int i;
-       int ret;
-
-       assert(RTE_DIM(priv->mac) == RTE_DIM(hash_rxq->mac_flow));
-       for (i = 0; (i != RTE_DIM(priv->mac)); ++i) {
-               if (!BITFIELD_ISSET(priv->mac_configured, i))
-                       continue;
-               ret = hash_rxq_mac_addr_add(hash_rxq, i);
-               if (!ret)
-                       continue;
-               /* Failure, rollback. */
-               while (i != 0)
-                       hash_rxq_mac_addr_del(hash_rxq, --i);
-               assert(ret > 0);
-               return ret;
-       }
-       return 0;
-}
-
-/**
- * Register a MAC address.
- *
- * This is done for each hash RX queue.
- *
- * @param priv
- *   Pointer to private structure.
- * @param mac_index
- *   MAC address index to use.
- * @param mac
- *   MAC address to register.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-int
-priv_mac_addr_add(struct priv *priv, unsigned int mac_index,
-                 const uint8_t (*mac)[ETHER_ADDR_LEN])
-{
-       unsigned int i;
-       int ret;
-
-       assert(mac_index < RTE_DIM(priv->mac));
-       /* First, make sure this address isn't already configured. */
-       for (i = 0; (i != RTE_DIM(priv->mac)); ++i) {
-               /* Skip this index, it's going to be reconfigured. */
-               if (i == mac_index)
-                       continue;
-               if (!BITFIELD_ISSET(priv->mac_configured, i))
-                       continue;
-               if (memcmp(priv->mac[i].addr_bytes, *mac, sizeof(*mac)))
-                       continue;
-               /* Address already configured elsewhere, return with error. */
-               return EADDRINUSE;
-       }
-       if (BITFIELD_ISSET(priv->mac_configured, mac_index))
-               priv_mac_addr_del(priv, mac_index);
-       priv->mac[mac_index] = (struct ether_addr){
-               {
-                       (*mac)[0], (*mac)[1], (*mac)[2],
-                       (*mac)[3], (*mac)[4], (*mac)[5]
-               }
-       };
-       if (!priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
-               goto end;
-       for (i = 0; (i != priv->hash_rxqs_n); ++i) {
-               ret = hash_rxq_mac_addr_add(&(*priv->hash_rxqs)[i], mac_index);
-               if (!ret)
-                       continue;
-               /* Failure, rollback. */
-               while (i != 0)
-                       hash_rxq_mac_addr_del(&(*priv->hash_rxqs)[--i],
-                                             mac_index);
-               return ret;
-       }
-end:
-       BITFIELD_SET(priv->mac_configured, mac_index);
-       return 0;
-}
-
-/**
- * Register all MAC addresses in all hash RX queues.
- *
- * @param priv
- *   Pointer to private structure.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-int
-priv_mac_addrs_enable(struct priv *priv)
-{
-       unsigned int i;
-       int ret;
-
-       if (priv->isolated)
-               return 0;
-       if (!priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
-               return 0;
-       for (i = 0; (i != priv->hash_rxqs_n); ++i) {
-               ret = hash_rxq_mac_addrs_add(&(*priv->hash_rxqs)[i]);
-               if (!ret)
-                       continue;
-               /* Failure, rollback. */
-               while (i != 0)
-                       hash_rxq_mac_addrs_del(&(*priv->hash_rxqs)[--i]);
-               assert(ret > 0);
-               return ret;
-       }
-       return 0;
+       assert(index < MLX5_MAX_MAC_ADDRESSES);
+       memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr));
+       if (!dev->data->promiscuous && !dev->data->all_multicast)
+               mlx5_traffic_restart(dev);
 }
 
 /**
@@ -468,31 +112,35 @@ priv_mac_addrs_enable(struct priv *priv)
  *   MAC address index.
  * @param vmdq
  *   VMDq pool index to associate address with (ignored).
+ *
+ * @return
+ *   0 on success.
  */
 int
-mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
                  uint32_t index, uint32_t vmdq)
 {
-       struct priv *priv = dev->data->dev_private;
-       int re;
-
-       if (mlx5_is_secondary())
-               return -ENOTSUP;
+       unsigned int i;
+       int ret = 0;
 
        (void)vmdq;
-       priv_lock(priv);
-       DEBUG("%p: adding MAC address at index %" PRIu32,
-             (void *)dev, index);
-       if (index >= RTE_DIM(priv->mac)) {
-               re = EINVAL;
-               goto end;
+       if (mlx5_is_secondary())
+               return 0;
+       assert(index < MLX5_MAX_MAC_ADDRESSES);
+       /* First, make sure this address isn't already configured. */
+       for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) {
+               /* Skip this index, it's going to be reconfigured. */
+               if (i == index)
+                       continue;
+               if (memcmp(&dev->data->mac_addrs[i], mac, sizeof(*mac)))
+                       continue;
+               /* Address already configured elsewhere, return with error. */
+               return EADDRINUSE;
        }
-       re = priv_mac_addr_add(priv, index,
-                              (const uint8_t (*)[ETHER_ADDR_LEN])
-                              mac_addr->addr_bytes);
-end:
-       priv_unlock(priv);
-       return -re;
+       dev->data->mac_addrs[index] = *mac;
+       if (!dev->data->promiscuous && !dev->data->all_multicast)
+               mlx5_traffic_restart(dev);
+       return ret;
 }
 
 /**
@@ -506,7 +154,8 @@ end:
 void
 mlx5_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
 {
+       if (mlx5_is_secondary())
+               return;
        DEBUG("%p: setting primary MAC address", (void *)dev);
-       mlx5_mac_addr_remove(dev, 0);
        mlx5_mac_addr_add(dev, mac_addr, 0, 0);
 }
index 0c75889..0ef2cdf 100644 (file)
 #include "mlx5_rxtx.h"
 #include "mlx5_utils.h"
 
-/* Initialization data for special flows. */
-static const struct special_flow_init special_flow_init[] = {
-       [HASH_RXQ_FLOW_TYPE_BROADCAST] = {
-               .dst_mac_val = "\xff\xff\xff\xff\xff\xff",
-               .dst_mac_mask = "\xff\xff\xff\xff\xff\xff",
-               .hash_types =
-                       1 << HASH_RXQ_UDPV4 |
-                       1 << HASH_RXQ_IPV4 |
-                       1 << HASH_RXQ_UDPV6 |
-                       1 << HASH_RXQ_IPV6 |
-                       1 << HASH_RXQ_ETH |
-                       0,
-               .per_vlan = 1,
-       },
-       [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = {
-               .dst_mac_val = "\x33\x33\x00\x00\x00\x00",
-               .dst_mac_mask = "\xff\xff\x00\x00\x00\x00",
-               .hash_types =
-                       1 << HASH_RXQ_UDPV6 |
-                       1 << HASH_RXQ_IPV6 |
-                       1 << HASH_RXQ_ETH |
-                       0,
-               .per_vlan = 1,
-       },
-};
-
-/**
- * Enable a special flow in a hash RX queue for a given VLAN index.
- *
- * @param hash_rxq
- *   Pointer to hash RX queue structure.
- * @param flow_type
- *   Special flow type.
- * @param vlan_index
- *   VLAN index to use.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-static int
-hash_rxq_special_flow_enable_vlan(struct hash_rxq *hash_rxq,
-                                 enum hash_rxq_flow_type flow_type,
-                                 unsigned int vlan_index)
-{
-       struct priv *priv = hash_rxq->priv;
-       struct ibv_flow *flow;
-       FLOW_ATTR_SPEC_ETH(data, priv_flow_attr(priv, NULL, 0, hash_rxq->type));
-       struct ibv_flow_attr *attr = &data->attr;
-       struct ibv_flow_spec_eth *spec = &data->spec;
-       const uint8_t *mac;
-       const uint8_t *mask;
-       unsigned int vlan_enabled = (priv->vlan_filter_n &&
-                                    special_flow_init[flow_type].per_vlan);
-       unsigned int vlan_id = priv->vlan_filter[vlan_index];
-
-       /* Check if flow is relevant for this hash_rxq. */
-       if (!(special_flow_init[flow_type].hash_types & (1 << hash_rxq->type)))
-               return 0;
-       /* Check if flow already exists. */
-       if (hash_rxq->special_flow[flow_type][vlan_index] != NULL)
-               return 0;
-
-       /*
-        * No padding must be inserted by the compiler between attr and spec.
-        * This layout is expected by libibverbs.
-        */
-       assert(((uint8_t *)attr + sizeof(*attr)) == (uint8_t *)spec);
-       priv_flow_attr(priv, attr, sizeof(data), hash_rxq->type);
-       /* The first specification must be Ethernet. */
-       assert(spec->type == IBV_FLOW_SPEC_ETH);
-       assert(spec->size == sizeof(*spec));
-
-       mac = special_flow_init[flow_type].dst_mac_val;
-       mask = special_flow_init[flow_type].dst_mac_mask;
-       *spec = (struct ibv_flow_spec_eth){
-               .type = IBV_FLOW_SPEC_ETH,
-               .size = sizeof(*spec),
-               .val = {
-                       .dst_mac = {
-                               mac[0], mac[1], mac[2],
-                               mac[3], mac[4], mac[5],
-                       },
-                       .vlan_tag = (vlan_enabled ?
-                                    rte_cpu_to_be_16(vlan_id) :
-                                    0),
-               },
-               .mask = {
-                       .dst_mac = {
-                               mask[0], mask[1], mask[2],
-                               mask[3], mask[4], mask[5],
-                       },
-                       .vlan_tag = (vlan_enabled ?
-                                    rte_cpu_to_be_16(0xfff) :
-                                    0),
-               },
-       };
-
-       errno = 0;
-       flow = ibv_create_flow(hash_rxq->qp, attr);
-       if (flow == NULL) {
-               /* It's not clear whether errno is always set in this case. */
-               ERROR("%p: flow configuration failed, errno=%d: %s",
-                     (void *)hash_rxq, errno,
-                     (errno ? strerror(errno) : "Unknown error"));
-               if (errno)
-                       return errno;
-               return EINVAL;
-       }
-       hash_rxq->special_flow[flow_type][vlan_index] = flow;
-       DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) enabled",
-             (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
-             vlan_id, vlan_index);
-       return 0;
-}
-
-/**
- * Disable a special flow in a hash RX queue for a given VLAN index.
- *
- * @param hash_rxq
- *   Pointer to hash RX queue structure.
- * @param flow_type
- *   Special flow type.
- * @param vlan_index
- *   VLAN index to use.
- */
-static void
-hash_rxq_special_flow_disable_vlan(struct hash_rxq *hash_rxq,
-                                  enum hash_rxq_flow_type flow_type,
-                                  unsigned int vlan_index)
-{
-       struct ibv_flow *flow =
-               hash_rxq->special_flow[flow_type][vlan_index];
-
-       if (flow == NULL)
-               return;
-       claim_zero(ibv_destroy_flow(flow));
-       hash_rxq->special_flow[flow_type][vlan_index] = NULL;
-       DEBUG("%p: special flow %s (index %d) VLAN %u (index %u) disabled",
-             (void *)hash_rxq, hash_rxq_flow_type_str(flow_type), flow_type,
-             hash_rxq->priv->vlan_filter[vlan_index], vlan_index);
-}
-
-/**
- * Enable a special flow in a hash RX queue.
- *
- * @param hash_rxq
- *   Pointer to hash RX queue structure.
- * @param flow_type
- *   Special flow type.
- * @param vlan_index
- *   VLAN index to use.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-static int
-hash_rxq_special_flow_enable(struct hash_rxq *hash_rxq,
-                            enum hash_rxq_flow_type flow_type)
-{
-       struct priv *priv = hash_rxq->priv;
-       unsigned int i = 0;
-       int ret;
-
-       assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
-       assert(RTE_DIM(hash_rxq->special_flow[flow_type]) ==
-              RTE_DIM(priv->vlan_filter));
-       /* Add a special flow for each VLAN filter when relevant. */
-       do {
-               ret = hash_rxq_special_flow_enable_vlan(hash_rxq, flow_type, i);
-               if (ret) {
-                       /* Failure, rollback. */
-                       while (i != 0)
-                               hash_rxq_special_flow_disable_vlan(hash_rxq,
-                                                                  flow_type,
-                                                                  --i);
-                       return ret;
-               }
-       } while (special_flow_init[flow_type].per_vlan &&
-                ++i < priv->vlan_filter_n);
-       return 0;
-}
-
-/**
- * Disable a special flow in a hash RX queue.
- *
- * @param hash_rxq
- *   Pointer to hash RX queue structure.
- * @param flow_type
- *   Special flow type.
- */
-static void
-hash_rxq_special_flow_disable(struct hash_rxq *hash_rxq,
-                             enum hash_rxq_flow_type flow_type)
-{
-       unsigned int i;
-
-       assert((unsigned int)flow_type < RTE_DIM(hash_rxq->special_flow));
-       for (i = 0; (i != RTE_DIM(hash_rxq->special_flow[flow_type])); ++i)
-               hash_rxq_special_flow_disable_vlan(hash_rxq, flow_type, i);
-}
-
-/**
- * Enable a special flow in all hash RX queues.
- *
- * @param priv
- *   Private structure.
- * @param flow_type
- *   Special flow type.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-int
-priv_special_flow_enable(struct priv *priv, enum hash_rxq_flow_type flow_type)
-{
-       unsigned int i;
-
-       if (!priv_allow_flow_type(priv, flow_type))
-               return 0;
-       for (i = 0; (i != priv->hash_rxqs_n); ++i) {
-               struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
-               int ret;
-
-               ret = hash_rxq_special_flow_enable(hash_rxq, flow_type);
-               if (!ret)
-                       continue;
-               /* Failure, rollback. */
-               while (i != 0) {
-                       hash_rxq = &(*priv->hash_rxqs)[--i];
-                       hash_rxq_special_flow_disable(hash_rxq, flow_type);
-               }
-               return ret;
-       }
-       return 0;
-}
-
-/**
- * Disable a special flow in all hash RX queues.
- *
- * @param priv
- *   Private structure.
- * @param flow_type
- *   Special flow type.
- */
-void
-priv_special_flow_disable(struct priv *priv, enum hash_rxq_flow_type flow_type)
-{
-       unsigned int i;
-
-       for (i = 0; (i != priv->hash_rxqs_n); ++i) {
-               struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i];
-
-               hash_rxq_special_flow_disable(hash_rxq, flow_type);
-       }
-}
-
-/**
- * Enable all special flows in all hash RX queues.
- *
- * @param priv
- *   Private structure.
- */
-int
-priv_special_flow_enable_all(struct priv *priv)
-{
-       enum hash_rxq_flow_type flow_type;
-
-       if (priv->isolated)
-               return 0;
-       for (flow_type = HASH_RXQ_FLOW_TYPE_BROADCAST;
-                       flow_type != HASH_RXQ_FLOW_TYPE_MAC;
-                       ++flow_type) {
-               int ret;
-
-               ret = priv_special_flow_enable(priv, flow_type);
-               if (!ret)
-                       continue;
-               /* Failure, rollback. */
-               while (flow_type)
-                       priv_special_flow_disable(priv, --flow_type);
-               return ret;
-       }
-       return 0;
-}
-
-/**
- * Disable all special flows in all hash RX queues.
- *
- * @param priv
- *   Private structure.
- */
-void
-priv_special_flow_disable_all(struct priv *priv)
-{
-       enum hash_rxq_flow_type flow_type;
-
-       for (flow_type = HASH_RXQ_FLOW_TYPE_BROADCAST;
-                       flow_type != HASH_RXQ_FLOW_TYPE_MAC;
-                       ++flow_type)
-               priv_special_flow_disable(priv, flow_type);
-}
-
 /**
  * DPDK callback to enable promiscuous mode.
  *
@@ -362,16 +60,10 @@ priv_special_flow_disable_all(struct priv *priv)
 void
 mlx5_promiscuous_enable(struct rte_eth_dev *dev)
 {
-       struct rte_flow_item_eth eth = {
-               .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
-               .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
-               .type = 0,
-       };
-
        if (mlx5_is_secondary())
                return;
        dev->data->promiscuous = 1;
-       claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 1));
+       mlx5_traffic_restart(dev);
 }
 
 /**
@@ -383,16 +75,10 @@ mlx5_promiscuous_enable(struct rte_eth_dev *dev)
 void
 mlx5_promiscuous_disable(struct rte_eth_dev *dev)
 {
-       struct rte_flow_item_eth eth = {
-               .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
-               .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
-               .type = 0,
-       };
-
        if (mlx5_is_secondary())
                return;
        dev->data->promiscuous = 0;
-       claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 0));
+       mlx5_traffic_restart(dev);
 }
 
 /**
@@ -404,17 +90,10 @@ mlx5_promiscuous_disable(struct rte_eth_dev *dev)
 void
 mlx5_allmulticast_enable(struct rte_eth_dev *dev)
 {
-       struct rte_flow_item_eth eth = {
-               .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
-               .src.addr_bytes = "\x01\x00\x00\x00\x00\x00",
-               .type = 0,
-       };
-
        if (mlx5_is_secondary())
                return;
        dev->data->all_multicast = 1;
-       if (dev->data->dev_started)
-               claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 1));
+       mlx5_traffic_restart(dev);
 }
 
 /**
@@ -426,15 +105,8 @@ mlx5_allmulticast_enable(struct rte_eth_dev *dev)
 void
 mlx5_allmulticast_disable(struct rte_eth_dev *dev)
 {
-       struct rte_flow_item_eth eth = {
-               .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
-               .src.addr_bytes = "\x01\x00\x00\x00\x00\x00",
-               .type = 0,
-       };
-
        if (mlx5_is_secondary())
                return;
        dev->data->all_multicast = 0;
-       if (dev->data->dev_started)
-               claim_zero(mlx5_ctrl_flow(dev, &eth, &eth, 0));
+       mlx5_traffic_restart(dev);
 }
index d3cd58e..c603d2b 100644 (file)
@@ -531,12 +531,6 @@ priv_destroy_hash_rxqs(struct priv *priv)
 
                assert(hash_rxq->priv == priv);
                assert(hash_rxq->qp != NULL);
-               /* Also check that there are no remaining flows. */
-               for (j = 0; (j != RTE_DIM(hash_rxq->special_flow)); ++j)
-                       for (k = 0;
-                            (k != RTE_DIM(hash_rxq->special_flow[j]));
-                            ++k)
-                               assert(hash_rxq->special_flow[j][k] == NULL);
                for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j)
                        for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k)
                                assert(hash_rxq->mac_flow[j][k] == NULL);
@@ -557,63 +551,6 @@ priv_destroy_hash_rxqs(struct priv *priv)
        priv->ind_tables = NULL;
 }
 
-/**
- * Check whether a given flow type is allowed.
- *
- * @param priv
- *   Pointer to private structure.
- * @param type
- *   Flow type to check.
- *
- * @return
- *   Nonzero if the given flow type is allowed.
- */
-int
-priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type)
-{
-       (void)priv;
-       switch (type) {
-       case HASH_RXQ_FLOW_TYPE_BROADCAST:
-       case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
-       case HASH_RXQ_FLOW_TYPE_MAC:
-               return 1;
-               return 1;
-       default:
-               /* Unsupported flow type is not allowed. */
-               return 0;
-       }
-       return 0;
-}
-
-/**
- * Automatically enable/disable flows according to configuration.
- *
- * @param priv
- *   Private structure.
- *
- * @return
- *   0 on success, errno value on failure.
- */
-int
-priv_rehash_flows(struct priv *priv)
-{
-       size_t i;
-
-       for (i = 0; i != RTE_DIM((*priv->hash_rxqs)[0].special_flow); ++i)
-               if (!priv_allow_flow_type(priv, i)) {
-                       priv_special_flow_disable(priv, i);
-               } else {
-                       int ret = priv_special_flow_enable(priv, i);
-
-                       if (ret)
-                               return ret;
-               }
-       if (priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC))
-               return priv_mac_addrs_enable(priv);
-       priv_mac_addrs_disable(priv);
-       return 0;
-}
-
 /**
  * Allocate RX queue elements.
  *
index 6f474d2..c60bc4d 100644 (file)
@@ -236,28 +236,6 @@ struct special_flow_init {
        unsigned int per_vlan:1;
 };
 
-enum hash_rxq_flow_type {
-       HASH_RXQ_FLOW_TYPE_BROADCAST,
-       HASH_RXQ_FLOW_TYPE_IPV6MULTI,
-       HASH_RXQ_FLOW_TYPE_MAC,
-};
-
-#ifndef NDEBUG
-static inline const char *
-hash_rxq_flow_type_str(enum hash_rxq_flow_type flow_type)
-{
-       switch (flow_type) {
-       case HASH_RXQ_FLOW_TYPE_BROADCAST:
-               return "broadcast";
-       case HASH_RXQ_FLOW_TYPE_IPV6MULTI:
-               return "IPv6 multicast";
-       case HASH_RXQ_FLOW_TYPE_MAC:
-               return "MAC";
-       }
-       return NULL;
-}
-#endif /* NDEBUG */
-
 struct hash_rxq {
        struct priv *priv; /* Back pointer to private data. */
        struct ibv_qp *qp; /* Hash RX QP. */
@@ -265,8 +243,6 @@ struct hash_rxq {
        /* MAC flow steering rules, one per VLAN ID. */
        struct ibv_flow *mac_flow
                [MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS];
-       struct ibv_flow *special_flow
-               [MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS];
 };
 
 /* TX queue descriptor. */
@@ -336,8 +312,6 @@ size_t priv_flow_attr(struct priv *, struct ibv_flow_attr *,
                      size_t, enum hash_rxq_type);
 int priv_create_hash_rxqs(struct priv *);
 void priv_destroy_hash_rxqs(struct priv *);
-int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type);
-int priv_rehash_flows(struct priv *);
 void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *);
 int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int,
                        const struct rte_eth_rxconf *, struct rte_mempool *);
index 27e7890..4143571 100644 (file)
@@ -135,7 +135,14 @@ mlx5_dev_start(struct rte_eth_dev *dev)
        if (mlx5_is_secondary())
                return -E_RTE_SECONDARY;
 
+       dev->data->dev_started = 1;
        priv_lock(priv);
+       err = priv_flow_create_drop_queue(priv);
+       if (err) {
+               ERROR("%p: Drop queue allocation failed: %s",
+                     (void *)dev, strerror(err));
+               goto error;
+       }
        DEBUG("%p: allocating and configuring hash RX queues", (void *)dev);
        rte_mempool_walk(mlx5_mp2mr_iter, priv);
        err = priv_txq_start(priv);
@@ -155,21 +162,8 @@ mlx5_dev_start(struct rte_eth_dev *dev)
        /* Update receive callback. */
        priv_dev_select_rx_function(priv, dev);
        err = priv_create_hash_rxqs(priv);
-       if (!err)
-               err = priv_rehash_flows(priv);
-       else {
-               ERROR("%p: an error occurred while configuring hash RX queues:"
-                     " %s",
-                     (void *)priv, strerror(err));
-               goto error;
-       }
-       if (dev->data->promiscuous)
-               mlx5_promiscuous_enable(dev);
-       else if (dev->data->all_multicast)
-               mlx5_allmulticast_enable(dev);
-       err = priv_flow_start(priv, &priv->ctrl_flows);
        if (err) {
-               ERROR("%p: an error occurred while configuring control flows:"
+               ERROR("%p: an error occurred while configuring hash RX queues:"
                      " %s",
                      (void *)priv, strerror(err));
                goto error;
@@ -193,15 +187,14 @@ mlx5_dev_start(struct rte_eth_dev *dev)
        return 0;
 error:
        /* Rollback. */
+       dev->data->dev_started = 0;
        LIST_FOREACH(mr, &priv->mr, next)
                priv_mr_release(priv, mr);
-       priv_special_flow_disable_all(priv);
-       priv_mac_addrs_disable(priv);
        priv_destroy_hash_rxqs(priv);
        priv_flow_stop(priv, &priv->flows);
-       priv_flow_flush(priv, &priv->ctrl_flows);
-       priv_rxq_stop(priv);
        priv_txq_stop(priv);
+       priv_rxq_stop(priv);
+       priv_flow_delete_drop_queue(priv);
        priv_unlock(priv);
        return -err;
 }
@@ -231,8 +224,6 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
        rte_wmb();
        usleep(1000 * priv->rxqs_n);
        DEBUG("%p: cleaning up and destroying hash RX queues", (void *)dev);
-       priv_special_flow_disable_all(priv);
-       priv_mac_addrs_disable(priv);
        priv_destroy_hash_rxqs(priv);
        priv_flow_stop(priv, &priv->flows);
        priv_flow_flush(priv, &priv->ctrl_flows);
@@ -243,5 +234,172 @@ mlx5_dev_stop(struct rte_eth_dev *dev)
        LIST_FOREACH(mr, &priv->mr, next) {
                priv_mr_release(priv, mr);
        }
+       priv_flow_delete_drop_queue(priv);
+       priv_unlock(priv);
+}
+
+/**
+ * Enable traffic flows configured by control plane
+ *
+ * @param priv
+ *   Pointer to Ethernet device private data.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ *
+ * @return
+ *   0 on success.
+ */
+int
+priv_dev_traffic_enable(struct priv *priv, struct rte_eth_dev *dev)
+{
+       if (priv->isolated)
+               return 0;
+       if (dev->data->promiscuous) {
+               struct rte_flow_item_eth promisc = {
+                       .dst.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+                       .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+                       .type = 0,
+               };
+
+               claim_zero(mlx5_ctrl_flow(dev, &promisc, &promisc));
+       } else if (dev->data->all_multicast) {
+               struct rte_flow_item_eth multicast = {
+                       .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+                       .src.addr_bytes = "\x01\x00\x00\x00\x00\x00",
+                       .type = 0,
+               };
+
+               claim_zero(mlx5_ctrl_flow(dev, &multicast, &multicast));
+       } else {
+               struct rte_flow_item_eth bcast = {
+                       .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+               };
+               struct rte_flow_item_eth ipv6_multi_spec = {
+                       .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
+               };
+               struct rte_flow_item_eth ipv6_multi_mask = {
+                       .dst.addr_bytes = "\xff\xff\x00\x00\x00\x00",
+               };
+               struct rte_flow_item_eth unicast = {
+                       .src.addr_bytes = "\x00\x00\x00\x00\x00\x00",
+               };
+               struct rte_flow_item_eth unicast_mask = {
+                       .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+               };
+               const unsigned int vlan_filter_n = priv->vlan_filter_n;
+               const struct ether_addr cmp = {
+                       .addr_bytes = "\x00\x00\x00\x00\x00\x00",
+               };
+               unsigned int i;
+               unsigned int j;
+               unsigned int unicast_flow = 0;
+               int ret;
+
+               for (i = 0; i != MLX5_MAX_MAC_ADDRESSES; ++i) {
+                       struct ether_addr *mac = &dev->data->mac_addrs[i];
+
+                       if (!memcmp(mac, &cmp, sizeof(*mac)))
+                               continue;
+                       memcpy(&unicast.dst.addr_bytes,
+                              mac->addr_bytes,
+                              ETHER_ADDR_LEN);
+                       for (j = 0; j != vlan_filter_n; ++j) {
+                               uint16_t vlan = priv->vlan_filter[j];
+
+                               struct rte_flow_item_vlan vlan_spec = {
+                                       .tci = rte_cpu_to_be_16(vlan),
+                               };
+                               struct rte_flow_item_vlan vlan_mask = {
+                                       .tci = 0xffff,
+                               };
+
+                               ret = mlx5_ctrl_flow_vlan(dev, &unicast,
+                                                         &unicast_mask,
+                                                         &vlan_spec,
+                                                         &vlan_mask);
+                               if (ret)
+                                       goto error;
+                               unicast_flow = 1;
+                       }
+                       if (!vlan_filter_n) {
+                               ret = mlx5_ctrl_flow(dev, &unicast,
+                                                    &unicast_mask);
+                               if (ret)
+                                       goto error;
+                               unicast_flow = 1;
+                       }
+               }
+               if (!unicast_flow)
+                       return 0;
+               ret = mlx5_ctrl_flow(dev, &bcast, &bcast);
+               if (ret)
+                       goto error;
+               ret = mlx5_ctrl_flow(dev, &ipv6_multi_spec, &ipv6_multi_mask);
+               if (ret)
+                       goto error;
+       }
+       return 0;
+error:
+       return rte_errno;
+}
+
+
+/**
+ * Disable traffic flows configured by control plane
+ *
+ * @param priv
+ *   Pointer to Ethernet device private data.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ *
+ * @return
+ *   0 on success.
+ */
+int
+priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev)
+{
+       (void)dev;
+       priv_flow_flush(priv, &priv->ctrl_flows);
+       return 0;
+}
+
+/**
+ * Restart traffic flows configured by control plane
+ *
+ * @param priv
+ *   Pointer to Ethernet device private data.
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ *
+ * @return
+ *   0 on success.
+ */
+int
+priv_dev_traffic_restart(struct priv *priv, struct rte_eth_dev *dev)
+{
+       if (dev->data->dev_started) {
+               priv_dev_traffic_disable(priv, dev);
+               priv_dev_traffic_enable(priv, dev);
+       }
+       return 0;
+}
+
+/**
+ * Restart traffic flows configured by control plane
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ *
+ * @return
+ *   0 on success.
+ */
+int
+mlx5_traffic_restart(struct rte_eth_dev *dev)
+{
+       struct priv *priv = dev->data->dev_private;
+
+       priv_lock(priv);
+       priv_dev_traffic_restart(priv, dev);
        priv_unlock(priv);
+       return 0;
 }
index d707984..ed91d9b 100644 (file)
@@ -44,7 +44,7 @@
 #include "mlx5_autoconf.h"
 
 /**
- * Configure a VLAN filter.
+ * DPDK callback to configure a VLAN filter.
  *
  * @param dev
  *   Pointer to Ethernet device structure.
  *   Toggle filter.
  *
  * @return
- *   0 on success, errno value on failure.
+ *   0 on success, negative errno value on failure.
  */
-static int
-vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+int
+mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
 {
        struct priv *priv = dev->data->dev_private;
        unsigned int i;
 
+       priv_lock(priv);
        DEBUG("%p: %s VLAN filter ID %" PRIu16,
              (void *)dev, (on ? "enable" : "disable"), vlan_id);
        assert(priv->vlan_filter_n <= RTE_DIM(priv->vlan_filter));
        for (i = 0; (i != priv->vlan_filter_n); ++i)
                if (priv->vlan_filter[i] == vlan_id)
                        break;
-       /* Check if there's room for another VLAN filter. */
-       if (i == RTE_DIM(priv->vlan_filter))
-               return ENOMEM;
        if (i < priv->vlan_filter_n) {
                assert(priv->vlan_filter_n != 0);
                /* Enabling an existing VLAN filter has no effect. */
                if (on)
-                       return 0;
+                       goto out;
                /* Remove VLAN filter from list. */
                --priv->vlan_filter_n;
                memmove(&priv->vlan_filter[i],
@@ -87,41 +85,16 @@ vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
                assert(i == priv->vlan_filter_n);
                /* Disabling an unknown VLAN filter has no effect. */
                if (!on)
-                       return 0;
+                       goto out;
                /* Add new VLAN filter. */
                priv->vlan_filter[priv->vlan_filter_n] = vlan_id;
                ++priv->vlan_filter_n;
        }
-       /* Rehash flows in all hash RX queues. */
-       priv_mac_addrs_disable(priv);
-       priv_special_flow_disable_all(priv);
-       return priv_rehash_flows(priv);
-}
-
-/**
- * DPDK callback to configure a VLAN filter.
- *
- * @param dev
- *   Pointer to Ethernet device structure.
- * @param vlan_id
- *   VLAN ID to filter.
- * @param on
- *   Toggle filter.
- *
- * @return
- *   0 on success, negative errno value on failure.
- */
-int
-mlx5_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
-{
-       struct priv *priv = dev->data->dev_private;
-       int ret;
-
-       priv_lock(priv);
-       ret = vlan_filter_set(dev, vlan_id, on);
+       if (dev->data->dev_started)
+               priv_dev_traffic_restart(priv, dev);
+out:
        priv_unlock(priv);
-       assert(ret >= 0);
-       return -ret;
+       return 0;
 }
 
 /**