Queue start/stop = Y
MTU update = Y
Jumbo frame = Y
+Unicast MAC filter = Y
SR-IOV = Y
Basic stats = Y
Stats per queue = Y
.dev_set_link_up = mlx4_dev_set_link_up,
.dev_close = mlx4_dev_close,
.link_update = mlx4_link_update,
+ .mac_addr_remove = mlx4_mac_addr_remove,
+ .mac_addr_add = mlx4_mac_addr_add,
+ .mac_addr_set = mlx4_mac_addr_set,
.stats_get = mlx4_stats_get,
.stats_reset = mlx4_stats_reset,
.dev_infos_get = mlx4_dev_infos_get,
mac.addr_bytes[2], mac.addr_bytes[3],
mac.addr_bytes[4], mac.addr_bytes[5]);
/* Register MAC address. */
- priv->mac = mac;
+ priv->mac[0] = mac;
#ifndef NDEBUG
{
char ifname[IF_NAMESIZE];
goto port_error;
}
eth_dev->data->dev_private = priv;
- eth_dev->data->mac_addrs = &priv->mac;
+ eth_dev->data->mac_addrs = priv->mac;
eth_dev->device = &pci_dev->device;
rte_eth_copy_pci_info(eth_dev, pci_dev);
eth_dev->device->driver = &mlx4_driver.driver;
#include <rte_interrupts.h>
#include <rte_mempool.h>
+/** Maximum number of simultaneous MAC addresses. This value is arbitrary. */
+#define MLX4_MAX_MAC_ADDRESSES 128
+
/** Request send completion once in every 64 sends, might be less. */
#define MLX4_PMD_TX_PER_COMP_REQ 64
struct ibv_context *ctx; /**< Verbs context. */
struct ibv_device_attr device_attr; /**< Device properties. */
struct ibv_pd *pd; /**< Protection Domain. */
- struct ether_addr mac; /**< MAC address. */
/* Device properties. */
uint16_t mtu; /**< Configured MTU. */
uint8_t port; /**< Physical port number. */
struct rte_intr_handle intr_handle; /**< Port interrupt handle. */
struct mlx4_drop *drop; /**< Shared resources for drop flow rules. */
LIST_HEAD(, rte_flow) flows; /**< Configured flow rule handles. */
+ struct ether_addr mac[MLX4_MAX_MAC_ADDRESSES];
+ /**< Configured MAC addresses. Unused entries are zeroed. */
};
/* mlx4_ethdev.c */
int mlx4_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
int mlx4_dev_set_link_down(struct rte_eth_dev *dev);
int mlx4_dev_set_link_up(struct rte_eth_dev *dev);
+void mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
+int mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq);
+void mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr);
int mlx4_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
void mlx4_stats_reset(struct rte_eth_dev *dev);
void mlx4_dev_infos_get(struct rte_eth_dev *dev,
#include <rte_errno.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
+#include <rte_flow.h>
#include <rte_pci.h>
#include "mlx4.h"
+#include "mlx4_flow.h"
#include "mlx4_rxtx.h"
#include "mlx4_utils.h"
return mlx4_dev_set_link(priv, 1);
}
+/**
+ * DPDK callback to remove a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param index
+ * MAC address index.
+ */
+void
+mlx4_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+
+ if (index >= RTE_DIM(priv->mac)) {
+ rte_errno = EINVAL;
+ return;
+ }
+ memset(&priv->mac[index], 0, sizeof(priv->mac[index]));
+ if (!mlx4_flow_sync(priv, &error))
+ return;
+ ERROR("failed to synchronize flow rules after removing MAC address"
+ " at index %d (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ index, rte_errno, strerror(rte_errno), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+}
+
+/**
+ * DPDK callback to add a MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ * @param index
+ * MAC address index.
+ * @param vmdq
+ * VMDq pool index to associate address with (ignored).
+ *
+ * @return
+ * 0 on success, negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint32_t index, uint32_t vmdq)
+{
+ struct priv *priv = dev->data->dev_private;
+ struct rte_flow_error error;
+ int ret;
+
+ (void)vmdq;
+ if (index >= RTE_DIM(priv->mac)) {
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ memcpy(&priv->mac[index], mac_addr, sizeof(priv->mac[index]));
+ ret = mlx4_flow_sync(priv, &error);
+ if (!ret)
+ return 0;
+ ERROR("failed to synchronize flow rules after adding MAC address"
+ " at index %d (code %d, \"%s\"),"
+ " flow error type %d, cause %p, message: %s",
+ index, rte_errno, strerror(rte_errno), error.type, error.cause,
+ error.message ? error.message : "(unspecified)");
+ return ret;
+}
+
+/**
+ * DPDK callback to set the primary MAC address.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param mac_addr
+ * MAC address to register.
+ */
+void
+mlx4_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
+{
+ mlx4_mac_addr_add(dev, mac_addr, 0, 0);
+}
+
/**
* DPDK callback to get information about the device.
*
max = 65535;
info->max_rx_queues = max;
info->max_tx_queues = max;
- /* Last array entry is reserved for broadcast. */
- info->max_mac_addrs = 1;
+ info->max_mac_addrs = RTE_DIM(priv->mac);
info->rx_offload_capa = 0;
info->tx_offload_capa = 0;
if (mlx4_get_ifname(priv, &ifname) == 0)
#include <rte_errno.h>
#include <rte_eth_ctrl.h>
#include <rte_ethdev.h>
+#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
/**
* Generate internal flow rules.
*
+ * - MAC flow rules are generated from @p dev->data->mac_addrs
+ * (@p priv->mac array).
+ * - An additional flow rule for Ethernet broadcasts is also generated.
+ *
* @param priv
* Pointer to private structure.
* @param[out] error
.priority = MLX4_FLOW_PRIORITY_LAST,
.ingress = 1,
};
+ struct rte_flow_item_eth eth_spec;
+ const struct rte_flow_item_eth eth_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ };
struct rte_flow_item pattern[] = {
{
.type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
},
{
.type = RTE_FLOW_ITEM_TYPE_ETH,
- .spec = &(struct rte_flow_item_eth){
- .dst = priv->mac,
- },
- .mask = &(struct rte_flow_item_eth){
- .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
- },
+ .spec = ð_spec,
+ .mask = ð_mask,
},
{
.type = RTE_FLOW_ITEM_TYPE_END,
.type = RTE_FLOW_ACTION_TYPE_END,
},
};
+ struct ether_addr *rule_mac = ð_spec.dst;
+ struct rte_flow *flow;
+ unsigned int i;
+ int err = 0;
- if (!mlx4_flow_create(priv->dev, &attr, pattern, actions, error))
- return -rte_errno;
- return 0;
+ for (i = 0; i != RTE_DIM(priv->mac) + 1; ++i) {
+ const struct ether_addr *mac;
+
+ /* Broadcasts are handled by an extra iteration. */
+ if (i < RTE_DIM(priv->mac))
+ mac = &priv->mac[i];
+ else
+ mac = ð_mask.dst;
+ if (is_zero_ether_addr(mac))
+ continue;
+ /* Check if MAC flow rule is already present. */
+ for (flow = LIST_FIRST(&priv->flows);
+ flow && flow->internal;
+ flow = LIST_NEXT(flow, next)) {
+ const struct ibv_flow_spec_eth *eth =
+ (const void *)((uintptr_t)flow->ibv_attr +
+ sizeof(*flow->ibv_attr));
+ unsigned int j;
+
+ if (!flow->mac)
+ continue;
+ assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
+ assert(flow->ibv_attr->num_of_specs == 1);
+ assert(eth->type == IBV_FLOW_SPEC_ETH);
+ for (j = 0; j != sizeof(mac->addr_bytes); ++j)
+ if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
+ eth->mask.dst_mac[j] != UINT8_C(0xff) ||
+ eth->val.src_mac[j] != UINT8_C(0x00) ||
+ eth->mask.src_mac[j] != UINT8_C(0x00))
+ break;
+ if (j == sizeof(mac->addr_bytes))
+ break;
+ }
+ if (!flow || !flow->internal) {
+ /* Not found, create a new flow rule. */
+ memcpy(rule_mac, mac, sizeof(*mac));
+ flow = mlx4_flow_create(priv->dev, &attr, pattern,
+ actions, error);
+ if (!flow) {
+ err = -rte_errno;
+ break;
+ }
+ }
+ flow->select = 1;
+ flow->mac = 1;
+ }
+ /* Clear selection and clean up stale MAC flow rules. */
+ flow = LIST_FIRST(&priv->flows);
+ while (flow && flow->internal) {
+ struct rte_flow *next = LIST_NEXT(flow, next);
+
+ if (flow->mac && !flow->select)
+ claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
+ else
+ flow->select = 0;
+ flow = next;
+ }
+ return err;
}
/**
flow && flow->internal;
flow = LIST_FIRST(&priv->flows))
claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
- } else if (!LIST_FIRST(&priv->flows) ||
- !LIST_FIRST(&priv->flows)->internal) {
- /*
- * If the first rule is not internal outside isolated mode,
- * they must be added back.
- */
+ } else {
+ /* Refresh internal rules. */
ret = mlx4_flow_internal(priv, error);
if (ret)
return ret;
struct ibv_flow *ibv_flow; /**< Verbs flow. */
struct ibv_flow_attr *ibv_attr; /**< Pointer to Verbs attributes. */
uint32_t ibv_attr_size; /**< Size of Verbs attributes. */
+ uint32_t select:1; /**< Used by operations on the linked list. */
uint32_t internal:1; /**< Internal flow rule outside isolated mode. */
+ uint32_t mac:1; /**< Rule associated with a configured MAC address. */
uint32_t promisc:1; /**< This rule matches everything. */
uint32_t drop:1; /**< This rule drops packets. */
uint32_t queue:1; /**< Target is a receive queue. */