/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#define DEFAULT_POLLING_INTERVAL_10_MS (10)
+const char pmd_bond_driver_name[] = "rte_bond_pmd";
+
int
-valid_bonded_ethdev(struct rte_eth_dev *eth_dev)
+check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev)
{
- size_t len;
-
/* Check valid pointer */
- if (eth_dev->driver->pci_drv.name == NULL || driver_name == NULL)
+ if (eth_dev->data->drv_name == NULL)
return -1;
- /* Check string lengths are equal */
- len = strlen(driver_name);
- if (strlen(eth_dev->driver->pci_drv.name) != len)
- return -1;
-
- /* Compare strings */
- return strncmp(eth_dev->driver->pci_drv.name, driver_name, len);
-}
-
-int
-valid_port_id(uint8_t port_id)
-{
- /* Verify that port id is valid */
- int ethdev_count = rte_eth_dev_count();
- if (port_id >= ethdev_count) {
- RTE_BOND_LOG(ERR, "Port Id %d is greater than rte_eth_dev_count %d",
- port_id, ethdev_count);
- return -1;
- }
-
- return 0;
+ /* return 0 if driver name matches */
+ return eth_dev->data->drv_name != pmd_bond_driver_name;
}
int
valid_bonded_port_id(uint8_t port_id)
{
- /* Verify that port id's are valid */
- if (valid_port_id(port_id))
+ if (!rte_eth_dev_is_valid_port(port_id))
return -1;
- /* Verify that bonded_port_id refers to a bonded port */
- if (valid_bonded_ethdev(&rte_eth_devices[port_id])) {
- RTE_BOND_LOG(ERR, "Specified port Id %d is not a bonded eth_dev device",
- port_id);
- return -1;
- }
-
- return 0;
+ return check_for_bonded_ethdev(&rte_eth_devices[port_id]);
}
int
valid_slave_port_id(uint8_t port_id)
{
/* Verify that port id's are valid */
- if (valid_port_id(port_id))
+ if (!rte_eth_dev_is_valid_port(port_id))
return -1;
/* Verify that port_id refers to a non bonded port */
- if (!valid_bonded_ethdev(&rte_eth_devices[port_id]))
+ if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0)
return -1;
return 0;
return ++sockets;
}
-const char *driver_name = "Link Bonding PMD";
-
int
rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id)
{
- struct rte_pci_device *pci_dev = NULL;
struct bond_dev_private *internals = NULL;
struct rte_eth_dev *eth_dev = NULL;
- struct eth_driver *eth_drv = NULL;
- struct rte_pci_driver *pci_drv = NULL;
- struct rte_pci_id *pci_id_table = NULL;
+
/* now do all data allocation - for eth_dev structure, dummy pci driver
* and internal (private) data
*/
goto err;
}
- pci_dev = rte_zmalloc_socket(name, sizeof(*pci_dev), 0, socket_id);
- if (pci_dev == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc pci dev on socket");
- goto err;
- }
-
- eth_drv = rte_zmalloc_socket(name, sizeof(*eth_drv), 0, socket_id);
- if (eth_drv == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc eth_drv on socket");
- goto err;
- }
-
- pci_drv = ð_drv->pci_drv;
-
- pci_id_table = rte_zmalloc_socket(name, sizeof(*pci_id_table), 0, socket_id);
- if (pci_id_table == NULL) {
- RTE_BOND_LOG(ERR, "Unable to malloc pci_id_table on socket");
- goto err;
- }
- pci_id_table->device_id = PCI_ANY_ID;
- pci_id_table->subsystem_device_id = PCI_ANY_ID;
- pci_id_table->vendor_id = PCI_ANY_ID;
- pci_id_table->subsystem_vendor_id = PCI_ANY_ID;
-
- pci_drv->id_table = pci_id_table;
- pci_drv->drv_flags = RTE_PCI_DRV_INTR_LSC;
-
internals = rte_zmalloc_socket(name, sizeof(*internals), 0, socket_id);
if (internals == NULL) {
RTE_BOND_LOG(ERR, "Unable to malloc internals on socket");
goto err;
}
- pci_dev->numa_node = socket_id;
- pci_drv->name = driver_name;
- pci_dev->driver = pci_drv;
-
- eth_dev->driver = eth_drv;
eth_dev->data->dev_private = internals;
eth_dev->data->nb_rx_queues = (uint16_t)1;
eth_dev->data->nb_tx_queues = (uint16_t)1;
TAILQ_INIT(&(eth_dev->link_intr_cbs));
- eth_dev->data->dev_link.link_status = 0;
+ eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
eth_dev->data->mac_addrs = rte_zmalloc_socket(name, ETHER_ADDR_LEN, 0,
socket_id);
+ if (eth_dev->data->mac_addrs == NULL) {
+ RTE_BOND_LOG(ERR, "Unable to malloc mac_addrs");
+ goto err;
+ }
eth_dev->data->dev_started = 0;
eth_dev->data->promiscuous = 0;
eth_dev->data->all_multicast = 0;
eth_dev->dev_ops = &default_dev_ops;
- eth_dev->pci_dev = pci_dev;
+ eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC |
+ RTE_ETH_DEV_DETACHABLE;
+ eth_dev->driver = NULL;
+ eth_dev->data->kdrv = RTE_KDRV_NONE;
+ eth_dev->data->drv_name = pmd_bond_driver_name;
+ eth_dev->data->numa_node = socket_id;
rte_spinlock_init(&internals->lock);
internals->port_id = eth_dev->data->port_id;
internals->mode = BONDING_MODE_INVALID;
- internals->current_primary_port = 0;
+ internals->current_primary_port = RTE_MAX_ETHPORTS + 1;
internals->balance_xmit_policy = BALANCE_XMIT_POLICY_LAYER2;
internals->xmit_hash = xmit_l2_hash;
internals->user_defined_mac = 0;
internals->rx_offload_capa = 0;
internals->tx_offload_capa = 0;
+ /* Initially allow to choose any offload type */
+ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+
memset(internals->active_slaves, 0, sizeof(internals->active_slaves));
memset(internals->slaves, 0, sizeof(internals->slaves));
return eth_dev->data->port_id;
err:
- rte_free(pci_dev);
- rte_free(pci_id_table);
- rte_free(eth_drv);
rte_free(internals);
-
+ if (eth_dev != NULL) {
+ rte_free(eth_dev->data->mac_addrs);
+ rte_eth_dev_release_port(eth_dev);
+ }
return -1;
}
+int
+rte_eth_bond_free(const char *name)
+{
+ struct rte_eth_dev *eth_dev = NULL;
+ struct bond_dev_private *internals;
+
+ /* now free all data allocation - for eth_dev structure,
+ * dummy pci driver and internal (private) data
+ */
+
+ /* find an ethdev entry */
+ eth_dev = rte_eth_dev_allocated(name);
+ if (eth_dev == NULL)
+ return -ENODEV;
+
+ internals = eth_dev->data->dev_private;
+ if (internals->slave_count != 0)
+ return -EBUSY;
+
+ if (eth_dev->data->dev_started == 1) {
+ bond_ethdev_stop(eth_dev);
+ bond_ethdev_close(eth_dev);
+ }
+
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+
+ rte_free(eth_dev->data->dev_private);
+ rte_free(eth_dev->data->mac_addrs);
+
+ rte_eth_dev_release_port(eth_dev);
+
+ return 0;
+}
+
static int
__eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id)
{
struct rte_eth_dev *bonded_eth_dev, *slave_eth_dev;
struct bond_dev_private *internals;
- struct bond_dev_private *temp_internals;
struct rte_eth_link link_props;
struct rte_eth_dev_info dev_info;
- int i, j;
-
if (valid_slave_port_id(slave_port_id) != 0)
return -1;
bonded_eth_dev = &rte_eth_devices[bonded_port_id];
internals = bonded_eth_dev->data->dev_private;
- /* Verify that new slave device is not already a slave of another
- * bonded device */
- for (i = rte_eth_dev_count()-1; i >= 0; i--) {
- if (valid_bonded_ethdev(&rte_eth_devices[i]) == 0) {
- temp_internals = rte_eth_devices[i].data->dev_private;
-
- for (j = 0; j < temp_internals->slave_count; j++) {
- /* Device already a slave of a bonded device */
- if (temp_internals->slaves[j].port_id == slave_port_id) {
- RTE_BOND_LOG(ERR, "Slave port %d is already a slave",
- slave_port_id);
- return -1;
- }
- }
- }
- }
-
slave_eth_dev = &rte_eth_devices[slave_port_id];
+ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_BONDED_SLAVE) {
+ RTE_BOND_LOG(ERR, "Slave device is already a slave of a bonded device");
+ return -1;
+ }
/* Add slave details to bonded device */
+ slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE;
slave_add(internals, slave_eth_dev);
rte_eth_dev_info_get(slave_port_id, &dev_info);
+ /* We need to store slaves reta_size to be able to synchronize RETA for all
+ * slave devices even if its sizes are different.
+ */
+ internals->slaves[internals->slave_count].reta_size = dev_info.reta_size;
+
if (internals->slave_count < 1) {
/* if MAC is not user defined then use MAC of first slave add to
* bonded device */
/* Make primary slave */
internals->primary_port = slave_port_id;
+ internals->current_primary_port = slave_port_id;
+
+ /* Inherit queues settings from first slave */
+ internals->nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
+ internals->nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
+
+ internals->reta_size = dev_info.reta_size;
/* Take the first dev's offload capabilities */
internals->rx_offload_capa = dev_info.rx_offload_capa;
internals->tx_offload_capa = dev_info.tx_offload_capa;
+ internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads;
} else {
/* Check slave link properties are supported if props are set,
if (internals->link_props_set) {
if (link_properties_valid(&(bonded_eth_dev->data->dev_link),
&(slave_eth_dev->data->dev_link))) {
+ slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
RTE_BOND_LOG(ERR,
"Slave port %d link speed/duplex not supported",
slave_port_id);
}
internals->rx_offload_capa &= dev_info.rx_offload_capa;
internals->tx_offload_capa &= dev_info.tx_offload_capa;
+ internals->flow_type_rss_offloads &= dev_info.flow_type_rss_offloads;
+
+ /* RETA size is GCD of all slaves RETA sizes, so, if all sizes will be
+ * the power of 2, the lower one is GCD
+ */
+ if (internals->reta_size > dev_info.reta_size)
+ internals->reta_size = dev_info.reta_size;
+
}
+ bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &=
+ internals->flow_type_rss_offloads;
+
internals->slave_count++;
/* Update all slave devices MACs*/
if (bonded_eth_dev->data->dev_started) {
if (slave_configure(bonded_eth_dev, slave_eth_dev) != 0) {
+ slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
RTE_BOND_LOG(ERR, "rte_bond_slaves_configure: port=%d",
slave_port_id);
return -1;
if (bonded_eth_dev->data->dev_started) {
rte_eth_link_get_nowait(slave_port_id, &link_props);
- if (link_props.link_status == 1)
- activate_slave(bonded_eth_dev, slave_port_id);
+ if (link_props.link_status == ETH_LINK_UP) {
+ if (internals->active_slave_count == 0 &&
+ !internals->user_defined_primary_port)
+ bond_ethdev_primary_set(internals,
+ slave_port_id);
+
+ if (find_slave_by_id(internals->active_slaves,
+ internals->active_slave_count,
+ slave_port_id) == internals->active_slave_count)
+ activate_slave(bonded_eth_dev, slave_port_id);
+ }
}
return 0;
{
struct rte_eth_dev *bonded_eth_dev;
struct bond_dev_private *internals;
-
+ struct rte_eth_dev *slave_eth_dev;
int i, slave_idx;
if (valid_slave_port_id(slave_port_id) != 0)
mac_address_set(&rte_eth_devices[slave_port_id],
&(internals->slaves[slave_idx].persisted_mac_addr));
- slave_remove(internals, &rte_eth_devices[slave_port_id]);
+ slave_eth_dev = &rte_eth_devices[slave_port_id];
+ slave_remove(internals, slave_eth_dev);
+ slave_eth_dev->data->dev_flags &= (~RTE_ETH_DEV_BONDED_SLAVE);
/* first slave in the active list will be the primary by default,
* otherwise use first device in list */
if (internals->slave_count == 0) {
internals->rx_offload_capa = 0;
internals->tx_offload_capa = 0;
+ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK;
+ internals->reta_size = 0;
}
return 0;
}