#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_ip.h>
+#include <rte_ip_frag.h>
#include <rte_devargs.h>
#include <rte_kvargs.h>
#include <rte_dev.h>
* mode and packet address does not match. */
if (unlikely(hdr->ether_type == ether_type_slow_be ||
!collecting || (!promisc &&
+ !is_multicast_ether_addr(&hdr->d_addr) &&
!is_same_ether_addr(&bond_mac, &hdr->d_addr)))) {
if (hdr->ether_type == ether_type_slow_be) {
static inline uint32_t
ipv4_hash(struct ipv4_hdr *ipv4_hdr)
{
- return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
+ return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
}
static inline uint32_t
l3hash = ipv4_hash(ipv4_hdr);
- ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
- IPV4_IHL_MULTIPLIER;
-
- if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
- tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
- ip_hdr_offset);
- l4hash = HASH_L4_PORTS(tcp_hdr);
- } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
- udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
- ip_hdr_offset);
- l4hash = HASH_L4_PORTS(udp_hdr);
+ /* there is no L4 header in fragmented packet */
+ if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) {
+ ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
+ IPV4_IHL_MULTIPLIER;
+
+ if (ipv4_hdr->next_proto_id == IPPROTO_TCP) {
+ tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
+ ip_hdr_offset);
+ l4hash = HASH_L4_PORTS(tcp_hdr);
+ } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) {
+ udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
+ ip_hdr_offset);
+ l4hash = HASH_L4_PORTS(udp_hdr);
+ }
}
} else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) {
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)
struct bond_rx_queue *bd_rx_q;
struct bond_tx_queue *bd_tx_q;
+ uint16_t old_nb_tx_queues = slave_eth_dev->data->nb_tx_queues;
+ uint16_t old_nb_rx_queues = slave_eth_dev->data->nb_rx_queues;
int errval;
uint16_t q_id;
rte_eth_dev_stop(slave_eth_dev->data->port_id);
/* Enable interrupts on slave device if supported */
- if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
+ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
slave_eth_dev->data->dev_conf.intr_conf.lsc = 1;
/* If RSS is enabled for bonding, try to enable it for slaves */
- if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
+ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len
!= 0) {
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len =
slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
- slave_eth_dev->data->dev_conf.rxmode.mq_mode |= ETH_MQ_RX_RSS;
+ slave_eth_dev->data->dev_conf.rxmode.mq_mode =
+ bonded_eth_dev->data->dev_conf.rxmode.mq_mode;
}
/* Configure device */
}
/* Setup Rx Queues */
- for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
+ /* Use existing queues, if any */
+ for (q_id = old_nb_rx_queues;
+ q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) {
bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id];
errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id,
}
/* Setup Tx Queues */
- for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
+ /* Use existing queues, if any */
+ for (q_id = old_nb_tx_queues;
+ q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) {
bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id];
errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id,
}
/* If lsc interrupt is set, check initial slave's link status */
- if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC)
+ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id,
- RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
+ RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id);
return 0;
}
slave_details->port_id = slave_eth_dev->data->port_id;
slave_details->last_link_status = 0;
- /* If slave device doesn't support interrupts then we need to enabled
- * polling to monitor link status */
- if (!(slave_eth_dev->pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) {
+ /* Mark slave devices that don't support interrupts so we can
+ * compensate when we start the bond
+ */
+ if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) {
slave_details->link_status_poll_enabled = 1;
-
- if (!internals->link_status_polling_enabled) {
- internals->link_status_polling_enabled = 1;
-
- rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000,
- bond_ethdev_slave_link_status_change_monitor,
- (void *)&rte_eth_devices[internals->port_id]);
- }
}
slave_details->link_status_wait_to_complete = 0;
int i;
/* slave eth dev will be started by bonded device */
- if (valid_bonded_ethdev(eth_dev)) {
+ if (check_for_bonded_ethdev(eth_dev)) {
RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)",
eth_dev->data->port_id);
return -1;
eth_dev->data->port_id, internals->slaves[i].port_id);
return -1;
}
+ /* We will need to poll for link status if any slave doesn't
+ * support interrupts
+ */
+ if (internals->slaves[i].link_status_poll_enabled)
+ internals->link_status_polling_enabled = 1;
+ }
+ /* start polling if needed */
+ if (internals->link_status_polling_enabled) {
+ rte_eal_alarm_set(
+ internals->link_status_polling_interval_ms * 1000,
+ bond_ethdev_slave_link_status_change_monitor,
+ (void *)&rte_eth_devices[internals->port_id]);
}
if (internals->user_defined_primary_port)
{
uint8_t i;
- for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rte_free(dev->data->rx_queues[i]);
- dev->data->rx_queues[i] = NULL;
+ if (dev->data->rx_queues != NULL) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rte_free(dev->data->rx_queues[i]);
+ dev->data->rx_queues[i] = NULL;
+ }
+ dev->data->nb_rx_queues = 0;
}
- dev->data->nb_rx_queues = 0;
- for (i = 0; i < dev->data->nb_tx_queues; i++) {
- rte_free(dev->data->tx_queues[i]);
- dev->data->tx_queues[i] = NULL;
+ if (dev->data->tx_queues != NULL) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ rte_free(dev->data->tx_queues[i]);
+ dev->data->tx_queues[i] = NULL;
+ }
+ dev->data->nb_tx_queues = 0;
}
- dev->data->nb_tx_queues = 0;
}
void
internals->active_slave_count = 0;
internals->link_status_polling_enabled = 0;
+ for (i = 0; i < internals->slave_count; i++)
+ internals->slaves[i].last_link_status = 0;
eth_dev->data->dev_link.link_status = 0;
eth_dev->data->dev_started = 0;
dev_info->max_tx_queues = (uint16_t)512;
dev_info->min_rx_bufsize = 0;
- dev_info->pci_dev = dev->pci_dev;
+ dev_info->pci_dev = NULL;
dev_info->rx_offload_capa = internals->rx_offload_capa;
dev_info->tx_offload_capa = internals->tx_offload_capa;
{
struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)
rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue),
- 0, dev->pci_dev->numa_node);
+ 0, dev->data->numa_node);
if (bd_rx_q == NULL)
return -1;
{
struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)
rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue),
- 0, dev->pci_dev->numa_node);
+ 0, dev->data->numa_node);
if (bd_tx_q == NULL)
return -1;
{
struct bond_dev_private *internals = dev->data->dev_private;
struct rte_eth_stats slave_stats;
- int i;
+ int i, j;
for (i = 0; i < internals->slave_count; i++) {
rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats);
stats->opackets += slave_stats.opackets;
stats->ibytes += slave_stats.ibytes;
stats->obytes += slave_stats.obytes;
+ stats->imissed += slave_stats.imissed;
stats->ierrors += slave_stats.ierrors;
stats->oerrors += slave_stats.oerrors;
stats->imcasts += slave_stats.imcasts;
stats->rx_nombuf += slave_stats.rx_nombuf;
- stats->fdirmatch += slave_stats.fdirmatch;
- stats->fdirmiss += slave_stats.fdirmiss;
- stats->tx_pause_xon += slave_stats.tx_pause_xon;
- stats->rx_pause_xon += slave_stats.rx_pause_xon;
- stats->tx_pause_xoff += slave_stats.tx_pause_xoff;
- stats->rx_pause_xoff += slave_stats.rx_pause_xoff;
+
+ for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
+ stats->q_ipackets[j] += slave_stats.q_ipackets[j];
+ stats->q_opackets[j] += slave_stats.q_opackets[j];
+ stats->q_ibytes[j] += slave_stats.q_ibytes[j];
+ stats->q_obytes[j] += slave_stats.q_obytes[j];
+ stats->q_errors[j] += slave_stats.q_errors[j];
+ }
+
}
}
bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param];
slave_eth_dev = &rte_eth_devices[port_id];
- if (valid_bonded_ethdev(bonded_eth_dev))
+ if (check_for_bonded_ethdev(bonded_eth_dev))
return;
internals = bonded_eth_dev->data->dev_private;
return 0;
}
-struct eth_dev_ops default_dev_ops = {
- .dev_start = bond_ethdev_start,
- .dev_stop = bond_ethdev_stop,
- .dev_close = bond_ethdev_close,
- .dev_configure = bond_ethdev_configure,
- .dev_infos_get = bond_ethdev_info,
- .rx_queue_setup = bond_ethdev_rx_queue_setup,
- .tx_queue_setup = bond_ethdev_tx_queue_setup,
- .rx_queue_release = bond_ethdev_rx_queue_release,
- .tx_queue_release = bond_ethdev_tx_queue_release,
- .link_update = bond_ethdev_link_update,
- .stats_get = bond_ethdev_stats_get,
- .stats_reset = bond_ethdev_stats_reset,
- .promiscuous_enable = bond_ethdev_promiscuous_enable,
- .promiscuous_disable = bond_ethdev_promiscuous_disable,
- .reta_update = bond_ethdev_rss_reta_update,
- .reta_query = bond_ethdev_rss_reta_query,
- .rss_hash_update = bond_ethdev_rss_hash_update,
- .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get
+const struct eth_dev_ops default_dev_ops = {
+ .dev_start = bond_ethdev_start,
+ .dev_stop = bond_ethdev_stop,
+ .dev_close = bond_ethdev_close,
+ .dev_configure = bond_ethdev_configure,
+ .dev_infos_get = bond_ethdev_info,
+ .rx_queue_setup = bond_ethdev_rx_queue_setup,
+ .tx_queue_setup = bond_ethdev_tx_queue_setup,
+ .rx_queue_release = bond_ethdev_rx_queue_release,
+ .tx_queue_release = bond_ethdev_tx_queue_release,
+ .link_update = bond_ethdev_link_update,
+ .stats_get = bond_ethdev_stats_get,
+ .stats_reset = bond_ethdev_stats_reset,
+ .promiscuous_enable = bond_ethdev_promiscuous_enable,
+ .promiscuous_disable = bond_ethdev_promiscuous_disable,
+ .reta_update = bond_ethdev_rss_reta_update,
+ .reta_query = bond_ethdev_rss_reta_query,
+ .rss_hash_update = bond_ethdev_rss_hash_update,
+ .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get
};
static int