/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
static inline uint16_t
ether_hash(struct ether_hdr *eth_hdr)
{
- uint16_t *word_src_addr = (uint16_t *)eth_hdr->s_addr.addr_bytes;
- uint16_t *word_dst_addr = (uint16_t *)eth_hdr->d_addr.addr_bytes;
+ unaligned_uint16_t *word_src_addr =
+ (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes;
+ unaligned_uint16_t *word_dst_addr =
+ (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes;
return (word_src_addr[0] ^ word_dst_addr[0]) ^
(word_src_addr[1] ^ word_dst_addr[1]) ^
static inline uint32_t
ipv6_hash(struct ipv6_hdr *ipv6_hdr)
{
- uint32_t *word_src_addr = (uint32_t *)&(ipv6_hdr->src_addr[0]);
- uint32_t *word_dst_addr = (uint32_t *)&(ipv6_hdr->dst_addr[0]);
+ unaligned_uint32_t *word_src_addr =
+ (unaligned_uint32_t *)&(ipv6_hdr->src_addr[0]);
+ unaligned_uint32_t *word_dst_addr =
+ (unaligned_uint32_t *)&(ipv6_hdr->dst_addr[0]);
return (word_src_addr[0] ^ word_dst_addr[0]) ^
(word_src_addr[1] ^ word_dst_addr[1]) ^
return -1;
}
+ /* If lsc interrupt is set, check initial slave's link status */
+ if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
+ bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
+ RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
+
return 0;
}
}
static void
+bond_ethdev_free_queues(struct rte_eth_dev *dev)
+{
+ uint8_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rte_free(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ rte_free(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
+}
+
+void
bond_ethdev_stop(struct rte_eth_dev *eth_dev)
{
struct bond_dev_private *internals = eth_dev->data->dev_private;
bond_mode_8023ad_stop(eth_dev);
/* Discard all messages to/from mode 4 state machines */
- for (i = 0; i < internals->slave_count; i++) {
- port = &mode_8023ad_ports[internals->slaves[i].port_id];
+ for (i = 0; i < internals->active_slave_count; i++) {
+ port = &mode_8023ad_ports[internals->active_slaves[i]];
RTE_VERIFY(port->rx_ring != NULL);
while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT)
eth_dev->data->dev_started = 0;
}
-static void
-bond_ethdev_close(struct rte_eth_dev *dev __rte_unused)
+void
+bond_ethdev_close(struct rte_eth_dev *dev)
{
+ bond_ethdev_free_queues(dev);
}
/* forward declaration */
{
struct bond_dev_private *internals = dev->data->dev_private;
- dev_info->driver_name = driver_name;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_pktlen = (uint32_t)2048;
{
struct bond_dev_private *internals = dev->data->dev_private;
struct rte_eth_stats slave_stats;
-
int i;
- /* clear bonded stats before populating from slaves */
- memset(stats, 0, sizeof(*stats));
-
for (i = 0; i < internals->slave_count; i++) {
rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
return -1;
}
+static int
+bond_uninit(const char *name)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name);
+
+ /* free link bonding eth device */
+ ret = rte_eth_bond_free(name);
+ if (ret < 0)
+ RTE_LOG(ERR, EAL, "Failed to free %s\n", name);
+
+ return ret;
+}
+
/* this part will resolve the slave portids after all the other pdev and vdev
* have been allocated */
static int
.name = "eth_bond",
.type = PMD_VDEV,
.init = bond_init,
+ .uninit = bond_uninit,
};
PMD_REGISTER_DRIVER(bond_drv);