X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fbonding%2Frte_eth_bond_pmd.c;h=54788cf8fcc7e9ecf2577d6e8ee27e43d8606487;hb=09419f235e099ecb265a590778fe64a685a2a241;hp=9cafe65fd93cf2f267fb33f95367c8a489274ce4;hpb=fa8cc60774a7167823f63997ab669b7f39c11af4;p=dpdk.git diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index 9cafe65fd9..54788cf8fc 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -170,6 +171,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, * mode and packet address does not match. */ if (unlikely(hdr->ether_type == ether_type_slow_be || !collecting || (!promisc && + !is_multicast_ether_addr(&hdr->d_addr) && !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) { if (hdr->ether_type == ether_type_slow_be) { @@ -479,7 +481,7 @@ ether_hash(struct ether_hdr *eth_hdr) static inline uint32_t ipv4_hash(struct ipv4_hdr *ipv4_hdr) { - return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr); + return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr; } static inline uint32_t @@ -552,17 +554,20 @@ xmit_l34_hash(const struct rte_mbuf *buf, uint8_t slave_count) l3hash = ipv4_hash(ipv4_hdr); - ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) * - IPV4_IHL_MULTIPLIER; - - if (ipv4_hdr->next_proto_id == IPPROTO_TCP) { - tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + - ip_hdr_offset); - l4hash = HASH_L4_PORTS(tcp_hdr); - } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { - udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr + - ip_hdr_offset); - l4hash = HASH_L4_PORTS(udp_hdr); + /* there is no L4 header in fragmented packet */ + if (likely(rte_ipv4_frag_pkt_is_fragmented(ipv4_hdr) == 0)) { + ip_hdr_offset = (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) * + IPV4_IHL_MULTIPLIER; + + if (ipv4_hdr->next_proto_id == IPPROTO_TCP) { + tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + + ip_hdr_offset); + l4hash = HASH_L4_PORTS(tcp_hdr); + } else if (ipv4_hdr->next_proto_id == IPPROTO_UDP) { + udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr + + ip_hdr_offset); + l4hash = HASH_L4_PORTS(udp_hdr); + } } } else if (rte_cpu_to_be_16(ETHER_TYPE_IPv6) == proto) { struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *) @@ -1300,6 +1305,8 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, struct bond_rx_queue *bd_rx_q; struct bond_tx_queue *bd_tx_q; + uint16_t old_nb_tx_queues = slave_eth_dev->data->nb_tx_queues; + uint16_t old_nb_rx_queues = slave_eth_dev->data->nb_rx_queues; int errval; uint16_t q_id; @@ -1307,9 +1314,27 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, rte_eth_dev_stop(slave_eth_dev->data->port_id); /* Enable interrupts on slave device if supported */ - if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC) + if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) slave_eth_dev->data->dev_conf.intr_conf.lsc = 1; + /* If RSS is enabled for bonding, try to enable it for slaves */ + if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len + != 0) { + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; + } else { + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; + } + + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = + bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + slave_eth_dev->data->dev_conf.rxmode.mq_mode = + bonded_eth_dev->data->dev_conf.rxmode.mq_mode; + } + /* Configure device */ errval = rte_eth_dev_configure(slave_eth_dev->data->port_id, bonded_eth_dev->data->nb_rx_queues, @@ -1322,7 +1347,9 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, } /* Setup Rx Queues */ - for (q_id = 0; q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) { + /* Use existing queues, if any */ + for (q_id = old_nb_rx_queues; + q_id < bonded_eth_dev->data->nb_rx_queues; q_id++) { bd_rx_q = (struct bond_rx_queue *)bonded_eth_dev->data->rx_queues[q_id]; errval = rte_eth_rx_queue_setup(slave_eth_dev->data->port_id, q_id, @@ -1338,7 +1365,9 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, } /* Setup Tx Queues */ - for (q_id = 0; q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) { + /* Use existing queues, if any */ + for (q_id = old_nb_tx_queues; + q_id < bonded_eth_dev->data->nb_tx_queues; q_id++) { bd_tx_q = (struct bond_tx_queue *)bonded_eth_dev->data->tx_queues[q_id]; errval = rte_eth_tx_queue_setup(slave_eth_dev->data->port_id, q_id, @@ -1361,10 +1390,34 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, return -1; } + /* If RSS is enabled for bonding, synchronize RETA */ + if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { + int i; + struct bond_dev_private *internals; + + internals = bonded_eth_dev->data->dev_private; + + for (i = 0; i < internals->slave_count; i++) { + if (internals->slaves[i].port_id == slave_eth_dev->data->port_id) { + errval = rte_eth_dev_rss_reta_update( + slave_eth_dev->data->port_id, + &internals->reta_conf[0], + internals->slaves[i].reta_size); + if (errval != 0) { + RTE_LOG(WARNING, PMD, + "rte_eth_dev_rss_reta_update on slave port %d fails (err %d)." + " RSS Configuration for bonding may be inconsistent.\n", + slave_eth_dev->data->port_id, errval); + } + break; + } + } + } + /* If lsc interrupt is set, check initial slave's link status */ - if (slave_eth_dev->driver->pci_drv.drv_flags & RTE_PCI_DRV_INTR_LSC) + if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) bond_ethdev_lsc_event_callback(slave_eth_dev->data->port_id, - RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id); + RTE_ETH_EVENT_INTR_LSC, &bonded_eth_dev->data->port_id); return 0; } @@ -1401,18 +1454,11 @@ slave_add(struct bond_dev_private *internals, slave_details->port_id = slave_eth_dev->data->port_id; slave_details->last_link_status = 0; - /* If slave device doesn't support interrupts then we need to enabled - * polling to monitor link status */ - if (!(slave_eth_dev->pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) { + /* Mark slave devices that don't support interrupts so we can + * compensate when we start the bond + */ + if (!(slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { slave_details->link_status_poll_enabled = 1; - - if (!internals->link_status_polling_enabled) { - internals->link_status_polling_enabled = 1; - - rte_eal_alarm_set(internals->link_status_polling_interval_ms * 1000, - bond_ethdev_slave_link_status_change_monitor, - (void *)&rte_eth_devices[internals->port_id]); - } } slave_details->link_status_wait_to_complete = 0; @@ -1447,13 +1493,13 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) int i; /* slave eth dev will be started by bonded device */ - if (valid_bonded_ethdev(eth_dev)) { + if (check_for_bonded_ethdev(eth_dev)) { RTE_BOND_LOG(ERR, "User tried to explicitly start a slave eth_dev (%d)", eth_dev->data->port_id); return -1; } - eth_dev->data->dev_link.link_status = 0; + eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; eth_dev->data->dev_started = 1; internals = eth_dev->data->dev_private; @@ -1497,6 +1543,18 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) eth_dev->data->port_id, internals->slaves[i].port_id); return -1; } + /* We will need to poll for link status if any slave doesn't + * support interrupts + */ + if (internals->slaves[i].link_status_poll_enabled) + internals->link_status_polling_enabled = 1; + } + /* start polling if needed */ + if (internals->link_status_polling_enabled) { + rte_eal_alarm_set( + internals->link_status_polling_interval_ms * 1000, + bond_ethdev_slave_link_status_change_monitor, + (void *)&rte_eth_devices[internals->port_id]); } if (internals->user_defined_primary_port) @@ -1513,6 +1571,28 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) } static void +bond_ethdev_free_queues(struct rte_eth_dev *dev) +{ + uint8_t i; + + if (dev->data->rx_queues != NULL) { + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rte_free(dev->data->rx_queues[i]); + dev->data->rx_queues[i] = NULL; + } + dev->data->nb_rx_queues = 0; + } + + if (dev->data->tx_queues != NULL) { + for (i = 0; i < dev->data->nb_tx_queues; i++) { + rte_free(dev->data->tx_queues[i]); + dev->data->tx_queues[i] = NULL; + } + dev->data->nb_tx_queues = 0; + } +} + +void bond_ethdev_stop(struct rte_eth_dev *eth_dev) { struct bond_dev_private *internals = eth_dev->data->dev_private; @@ -1547,14 +1627,17 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) internals->active_slave_count = 0; internals->link_status_polling_enabled = 0; + for (i = 0; i < internals->slave_count; i++) + internals->slaves[i].last_link_status = 0; - eth_dev->data->dev_link.link_status = 0; + eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; eth_dev->data->dev_started = 0; } -static void -bond_ethdev_close(struct rte_eth_dev *dev __rte_unused) +void +bond_ethdev_close(struct rte_eth_dev *dev) { + bond_ethdev_free_queues(dev); } /* forward declaration */ @@ -1573,10 +1656,13 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_tx_queues = (uint16_t)512; dev_info->min_rx_bufsize = 0; - dev_info->pci_dev = dev->pci_dev; + dev_info->pci_dev = NULL; dev_info->rx_offload_capa = internals->rx_offload_capa; dev_info->tx_offload_capa = internals->tx_offload_capa; + dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; + + dev_info->reta_size = internals->reta_size; } static int @@ -1586,7 +1672,7 @@ bond_ethdev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, { struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *) rte_zmalloc_socket(NULL, sizeof(struct bond_rx_queue), - 0, dev->pci_dev->numa_node); + 0, dev->data->numa_node); if (bd_rx_q == NULL) return -1; @@ -1610,7 +1696,7 @@ bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, { struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *) rte_zmalloc_socket(NULL, sizeof(struct bond_tx_queue), - 0, dev->pci_dev->numa_node); + 0, dev->data->numa_node); if (bd_tx_q == NULL) return -1; @@ -1710,7 +1796,7 @@ bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev, if (!bonded_eth_dev->data->dev_started || internals->active_slave_count == 0) { - bonded_eth_dev->data->dev_link.link_status = 0; + bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; return 0; } else { struct rte_eth_dev *slave_eth_dev; @@ -1721,7 +1807,7 @@ bond_ethdev_link_update(struct rte_eth_dev *bonded_eth_dev, (*slave_eth_dev->dev_ops->link_update)(slave_eth_dev, wait_to_complete); - if (slave_eth_dev->data->dev_link.link_status == 1) { + if (slave_eth_dev->data->dev_link.link_status == ETH_LINK_UP) { link_up = 1; break; } @@ -1738,7 +1824,7 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct bond_dev_private *internals = dev->data->dev_private; struct rte_eth_stats slave_stats; - int i; + int i, j; for (i = 0; i < internals->slave_count; i++) { rte_eth_stats_get(internals->slaves[i].port_id, &slave_stats); @@ -1747,16 +1833,20 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->opackets += slave_stats.opackets; stats->ibytes += slave_stats.ibytes; stats->obytes += slave_stats.obytes; + stats->imissed += slave_stats.imissed; stats->ierrors += slave_stats.ierrors; stats->oerrors += slave_stats.oerrors; stats->imcasts += slave_stats.imcasts; stats->rx_nombuf += slave_stats.rx_nombuf; - stats->fdirmatch += slave_stats.fdirmatch; - stats->fdirmiss += slave_stats.fdirmiss; - stats->tx_pause_xon += slave_stats.tx_pause_xon; - stats->rx_pause_xon += slave_stats.rx_pause_xon; - stats->tx_pause_xoff += slave_stats.tx_pause_xoff; - stats->rx_pause_xoff += slave_stats.rx_pause_xoff; + + for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { + stats->q_ipackets[j] += slave_stats.q_ipackets[j]; + stats->q_opackets[j] += slave_stats.q_opackets[j]; + stats->q_ibytes[j] += slave_stats.q_ibytes[j]; + stats->q_obytes[j] += slave_stats.q_obytes[j]; + stats->q_errors[j] += slave_stats.q_errors[j]; + } + } } @@ -1854,7 +1944,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type, bonded_eth_dev = &rte_eth_devices[*(uint8_t *)param]; slave_eth_dev = &rte_eth_devices[port_id]; - if (valid_bonded_ethdev(bonded_eth_dev)) + if (check_for_bonded_ethdev(bonded_eth_dev)) return; internals = bonded_eth_dev->data->dev_private; @@ -1886,7 +1976,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type, /* if no active slave ports then set this port to be primary port */ if (internals->active_slave_count < 1) { /* If first active slave, then change link status */ - bonded_eth_dev->data->dev_link.link_status = 1; + bonded_eth_dev->data->dev_link.link_status = ETH_LINK_UP; internals->current_primary_port = port_id; lsc_flag = 1; @@ -1914,7 +2004,7 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type, * link properties */ if (internals->active_slave_count < 1) { lsc_flag = 1; - bonded_eth_dev->data->dev_link.link_status = 0; + bonded_eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; link_properties_reset(bonded_eth_dev); } @@ -1958,21 +2048,132 @@ bond_ethdev_lsc_event_callback(uint8_t port_id, enum rte_eth_event_type type, } } -struct eth_dev_ops default_dev_ops = { - .dev_start = bond_ethdev_start, - .dev_stop = bond_ethdev_stop, - .dev_close = bond_ethdev_close, - .dev_configure = bond_ethdev_configure, - .dev_infos_get = bond_ethdev_info, - .rx_queue_setup = bond_ethdev_rx_queue_setup, - .tx_queue_setup = bond_ethdev_tx_queue_setup, - .rx_queue_release = bond_ethdev_rx_queue_release, - .tx_queue_release = bond_ethdev_tx_queue_release, - .link_update = bond_ethdev_link_update, - .stats_get = bond_ethdev_stats_get, - .stats_reset = bond_ethdev_stats_reset, - .promiscuous_enable = bond_ethdev_promiscuous_enable, - .promiscuous_disable = bond_ethdev_promiscuous_disable +static int +bond_ethdev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) +{ + unsigned i, j; + int result = 0; + int slave_reta_size; + unsigned reta_count; + struct bond_dev_private *internals = dev->data->dev_private; + + if (reta_size != internals->reta_size) + return -EINVAL; + + /* Copy RETA table */ + reta_count = reta_size / RTE_RETA_GROUP_SIZE; + + for (i = 0; i < reta_count; i++) { + internals->reta_conf[i].mask = reta_conf[i].mask; + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + internals->reta_conf[i].reta[j] = reta_conf[i].reta[j]; + } + + /* Fill rest of array */ + for (; i < RTE_DIM(internals->reta_conf); i += reta_count) + memcpy(&internals->reta_conf[i], &internals->reta_conf[0], + sizeof(internals->reta_conf[0]) * reta_count); + + /* Propagate RETA over slaves */ + for (i = 0; i < internals->slave_count; i++) { + slave_reta_size = internals->slaves[i].reta_size; + result = rte_eth_dev_rss_reta_update(internals->slaves[i].port_id, + &internals->reta_conf[0], slave_reta_size); + if (result < 0) + return result; + } + + return 0; +} + +static int +bond_ethdev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size) +{ + int i, j; + struct bond_dev_private *internals = dev->data->dev_private; + + if (reta_size != internals->reta_size) + return -EINVAL; + + /* Copy RETA table */ + for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + reta_conf[i].reta[j] = internals->reta_conf[i].reta[j]; + + return 0; +} + +static int +bond_ethdev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + int i, result = 0; + struct bond_dev_private *internals = dev->data->dev_private; + struct rte_eth_rss_conf bond_rss_conf; + + memcpy(&bond_rss_conf, rss_conf, sizeof(struct rte_eth_rss_conf)); + + bond_rss_conf.rss_hf &= internals->flow_type_rss_offloads; + + if (bond_rss_conf.rss_hf != 0) + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf; + + if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len < + sizeof(internals->rss_key)) { + if (bond_rss_conf.rss_key_len == 0) + bond_rss_conf.rss_key_len = 40; + internals->rss_key_len = bond_rss_conf.rss_key_len; + memcpy(internals->rss_key, bond_rss_conf.rss_key, + internals->rss_key_len); + } + + for (i = 0; i < internals->slave_count; i++) { + result = rte_eth_dev_rss_hash_update(internals->slaves[i].port_id, + &bond_rss_conf); + if (result < 0) + return result; + } + + return 0; +} + +static int +bond_ethdev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct bond_dev_private *internals = dev->data->dev_private; + + rss_conf->rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; + rss_conf->rss_key_len = internals->rss_key_len; + if (rss_conf->rss_key) + memcpy(rss_conf->rss_key, internals->rss_key, internals->rss_key_len); + + return 0; +} + +const struct eth_dev_ops default_dev_ops = { + .dev_start = bond_ethdev_start, + .dev_stop = bond_ethdev_stop, + .dev_close = bond_ethdev_close, + .dev_configure = bond_ethdev_configure, + .dev_infos_get = bond_ethdev_info, + .rx_queue_setup = bond_ethdev_rx_queue_setup, + .tx_queue_setup = bond_ethdev_tx_queue_setup, + .rx_queue_release = bond_ethdev_rx_queue_release, + .tx_queue_release = bond_ethdev_tx_queue_release, + .link_update = bond_ethdev_link_update, + .stats_get = bond_ethdev_stats_get, + .stats_reset = bond_ethdev_stats_reset, + .promiscuous_enable = bond_ethdev_promiscuous_enable, + .promiscuous_disable = bond_ethdev_promiscuous_disable, + .reta_update = bond_ethdev_rss_reta_update, + .reta_query = bond_ethdev_rss_reta_query, + .rss_hash_update = bond_ethdev_rss_hash_update, + .rss_hash_conf_get = bond_ethdev_rss_hash_conf_get }; static int @@ -2042,6 +2243,24 @@ parse_error: return -1; } +static int +bond_uninit(const char *name) +{ + int ret; + + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, EAL, "Uninitializing pmd_bond for %s\n", name); + + /* free link bonding eth device */ + ret = rte_eth_bond_free(name); + if (ret < 0) + RTE_LOG(ERR, EAL, "Failed to free %s\n", name); + + return ret; +} + /* this part will resolve the slave portids after all the other pdev and vdev * have been allocated */ static int @@ -2053,6 +2272,28 @@ bond_ethdev_configure(struct rte_eth_dev *dev) int arg_count; uint8_t port_id = dev - rte_eth_devices; + static const uint8_t default_rss_key[40] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, 0x41, 0x67, 0x25, 0x3D, + 0x43, 0xA3, 0x8F, 0xB0, 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, 0x6A, 0x42, 0xB7, 0x3B, + 0xBE, 0xAC, 0x01, 0xFA + }; + + unsigned i, j; + + /* If RSS is enabled, fill table and key with default values */ + if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key; + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0; + memcpy(internals->rss_key, default_rss_key, 40); + + for (i = 0; i < RTE_DIM(internals->reta_conf); i++) { + internals->reta_conf[i].mask = ~0LL; + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues; + } + } + /* * if no kvlist, it means that this bonded device has been created * through the bonding api. @@ -2268,6 +2509,7 @@ static struct rte_driver bond_drv = { .name = "eth_bond", .type = PMD_VDEV, .init = bond_init, + .uninit = bond_uninit, }; PMD_REGISTER_DRIVER(bond_drv);