X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbonding%2Frte_eth_bond_pmd.c;h=9bbe1291bcf8b5e9ebf64a503a734123618914cb;hb=f9f0b5121f62d8701165d20bfdd14638bc72654d;hp=060c1ddc3fa3fb72815a6677efc29f7b9503f4bd;hpb=fc1134c79283f8a3c954c32020390a22efef95e6;p=dpdk.git diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index 060c1ddc3f..9bbe1291bc 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -7,8 +7,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -21,8 +21,8 @@ #include #include "rte_eth_bond.h" -#include "rte_eth_bond_private.h" -#include "rte_eth_bond_8023ad_private.h" +#include "eth_bond_private.h" +#include "eth_bond_8023ad_private.h" #define REORDER_PERIOD_MS 10 #define DEFAULT_POLLING_INTERVAL_10_MS (10) @@ -69,7 +69,7 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; internals = bd_rx_q->dev_private; slave_count = internals->active_slave_count; - active_slave = internals->active_slave; + active_slave = bd_rx_q->active_slave; for (i = 0; i < slave_count && nb_pkts; i++) { uint16_t num_rx_slave; @@ -86,8 +86,8 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) active_slave = 0; } - if (++internals->active_slave >= slave_count) - internals->active_slave = 0; + if (++bd_rx_q->active_slave >= slave_count) + bd_rx_q->active_slave = 0; return num_rx_total; } @@ -112,7 +112,7 @@ is_lacp_packets(uint16_t ethertype, uint8_t subtype, struct rte_mbuf *mbuf) const uint16_t ether_type_slow_be = rte_be_to_cpu_16(RTE_ETHER_TYPE_SLOW); - return !((mbuf->ol_flags & PKT_RX_VLAN) ? mbuf->vlan_tci : 0) && + return !((mbuf->ol_flags & RTE_MBUF_F_RX_VLAN) ? mbuf->vlan_tci : 0) && (ethertype == ether_type_slow_be && (subtype == SLOW_SUBTYPE_MARKER || subtype == SLOW_SUBTYPE_LACP)); } @@ -303,9 +303,9 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * slave_count); - idx = internals->active_slave; + idx = bd_rx_q->active_slave; if (idx >= slave_count) { - internals->active_slave = 0; + bd_rx_q->active_slave = 0; idx = 0; } for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) { @@ -342,11 +342,11 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, bufs[j])) || !collecting || (!promisc && - ((rte_is_unicast_ether_addr(&hdr->d_addr) && + ((rte_is_unicast_ether_addr(&hdr->dst_addr) && !rte_is_same_ether_addr(bond_mac, - &hdr->d_addr)) || + &hdr->dst_addr)) || (!allmulti && - rte_is_multicast_ether_addr(&hdr->d_addr)))))) { + rte_is_multicast_ether_addr(&hdr->dst_addr)))))) { if (hdr->ether_type == ether_type_slow_be) { bond_mode_8023ad_handle_slow_pkt( @@ -367,8 +367,8 @@ rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts, idx = 0; } - if (++internals->active_slave >= slave_count) - internals->active_slave = 0; + if (++bd_rx_q->active_slave >= slave_count) + bd_rx_q->active_slave = 0; return num_rx_total; } @@ -473,25 +473,21 @@ update_client_stats(uint32_t addr, uint16_t port, uint32_t *TXorRXindicator) #ifdef RTE_LIBRTE_BOND_DEBUG_ALB #define MODE6_DEBUG(info, src_ip, dst_ip, eth_h, arp_op, port, burstnumber) \ rte_log(RTE_LOG_DEBUG, bond_logtype, \ - "%s port:%d SrcMAC:%02X:%02X:%02X:%02X:%02X:%02X SrcIP:%s " \ - "DstMAC:%02X:%02X:%02X:%02X:%02X:%02X DstIP:%s %s %d\n", \ + "%s port:%d SrcMAC:" RTE_ETHER_ADDR_PRT_FMT " SrcIP:%s " \ + "DstMAC:" RTE_ETHER_ADDR_PRT_FMT " DstIP:%s %s %d\n", \ info, \ port, \ - eth_h->s_addr.addr_bytes[0], eth_h->s_addr.addr_bytes[1], \ - eth_h->s_addr.addr_bytes[2], eth_h->s_addr.addr_bytes[3], \ - eth_h->s_addr.addr_bytes[4], eth_h->s_addr.addr_bytes[5], \ + RTE_ETHER_ADDR_BYTES(ð_h->src_addr), \ src_ip, \ - eth_h->d_addr.addr_bytes[0], eth_h->d_addr.addr_bytes[1], \ - eth_h->d_addr.addr_bytes[2], eth_h->d_addr.addr_bytes[3], \ - eth_h->d_addr.addr_bytes[4], eth_h->d_addr.addr_bytes[5], \ + RTE_ETHER_ADDR_BYTES(ð_h->dst_addr), \ dst_ip, \ arp_op, ++burstnumber) #endif static void -mode6_debug(const char __attribute__((unused)) *info, +mode6_debug(const char __rte_unused *info, struct rte_ether_hdr *eth_h, uint16_t port, - uint32_t __attribute__((unused)) *burstnumber) + uint32_t __rte_unused *burstnumber) { struct rte_ipv4_hdr *ipv4_h; #ifdef RTE_LIBRTE_BOND_DEBUG_ALB @@ -534,8 +530,8 @@ mode6_debug(const char __attribute__((unused)) *info, static uint16_t bond_ethdev_rx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { - struct bond_tx_queue *bd_tx_q = (struct bond_tx_queue *)queue; - struct bond_dev_private *internals = bd_tx_q->dev_private; + struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; + struct bond_dev_private *internals = bd_rx_q->dev_private; struct rte_ether_hdr *eth_h; uint16_t ether_type, offset; uint16_t nb_recv_pkts; @@ -647,9 +643,9 @@ static inline uint16_t ether_hash(struct rte_ether_hdr *eth_hdr) { unaligned_uint16_t *word_src_addr = - (unaligned_uint16_t *)eth_hdr->s_addr.addr_bytes; + (unaligned_uint16_t *)eth_hdr->src_addr.addr_bytes; unaligned_uint16_t *word_dst_addr = - (unaligned_uint16_t *)eth_hdr->d_addr.addr_bytes; + (unaligned_uint16_t *)eth_hdr->dst_addr.addr_bytes; return (word_src_addr[0] ^ word_dst_addr[0]) ^ (word_src_addr[1] ^ word_dst_addr[1]) ^ @@ -946,10 +942,10 @@ bond_ethdev_tx_burst_tlb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) ether_hdr = rte_pktmbuf_mtod(bufs[j], struct rte_ether_hdr *); - if (rte_is_same_ether_addr(ðer_hdr->s_addr, + if (rte_is_same_ether_addr(ðer_hdr->src_addr, &primary_slave_addr)) rte_ether_addr_copy(&active_slave_addr, - ðer_hdr->s_addr); + ðer_hdr->src_addr); #if defined(RTE_LIBRTE_BOND_DEBUG_ALB) || defined(RTE_LIBRTE_BOND_DEBUG_ALB_L1) mode6_debug("TX IPv4:", ether_hdr, slaves[i], &burstnumberTX); #endif @@ -1021,7 +1017,7 @@ bond_ethdev_tx_burst_alb(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) slave_idx = bond_mode_alb_arp_xmit(eth_h, offset, internals); /* Change src mac in eth header */ - rte_eth_macaddr_get(slave_idx, ð_h->s_addr); + rte_eth_macaddr_get(slave_idx, ð_h->src_addr); /* Add packet to slave tx buffer */ slave_bufs[slave_idx][slave_bufs_pkts[slave_idx]] = bufs[i]; @@ -1373,8 +1369,8 @@ link_properties_set(struct rte_eth_dev *ethdev, struct rte_eth_link *slave_link) * In any other mode the link properties are set to default * values of AUTONEG/DUPLEX */ - ethdev->data->dev_link.link_autoneg = ETH_LINK_AUTONEG; - ethdev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; + ethdev->data->dev_link.link_autoneg = RTE_ETH_LINK_AUTONEG; + ethdev->data->dev_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; } } @@ -1502,6 +1498,7 @@ int mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) { struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; + bool set; int i; /* Update slave devices MAC addresses */ @@ -1529,15 +1526,16 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) case BONDING_MODE_TLB: case BONDING_MODE_ALB: default: + set = true; for (i = 0; i < internals->slave_count; i++) { if (internals->slaves[i].port_id == internals->current_primary_port) { if (rte_eth_dev_default_mac_addr_set( - internals->primary_port, + internals->current_primary_port, bonded_eth_dev->data->mac_addrs)) { RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", internals->current_primary_port); - return -1; + set = false; } } else { if (rte_eth_dev_default_mac_addr_set( @@ -1545,10 +1543,11 @@ mac_address_slaves_update(struct rte_eth_dev *bonded_eth_dev) &internals->slaves[i].persisted_mac_addr)) { RTE_BOND_LOG(ERR, "Failed to update port Id %d MAC address", internals->slaves[i].port_id); - return -1; } } } + if (!set) + return -1; } return 0; @@ -1691,22 +1690,22 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, struct bond_dev_private *internals = bonded_eth_dev->data->dev_private; /* Stop slave */ - rte_eth_dev_stop(slave_eth_dev->data->port_id); + errval = rte_eth_dev_stop(slave_eth_dev->data->port_id); + if (errval != 0) + RTE_BOND_LOG(ERR, "rte_eth_dev_stop: port %u, err (%d)", + slave_eth_dev->data->port_id, errval); /* Enable interrupts on slave device if supported */ if (slave_eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) slave_eth_dev->data->dev_conf.intr_conf.lsc = 1; /* If RSS is enabled for bonding, try to enable it for slaves */ - if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { - if (internals->rss_key_len != 0) { - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = + if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) { + /* rss_key won't be empty if RSS is configured in bonded dev */ + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = internals->rss_key_len; - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = + slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key; - } else { - slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; - } slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; @@ -1715,12 +1714,15 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, } if (bonded_eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_FILTER) + RTE_ETH_RX_OFFLOAD_VLAN_FILTER) slave_eth_dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_VLAN_FILTER; + RTE_ETH_RX_OFFLOAD_VLAN_FILTER; else slave_eth_dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_VLAN_FILTER; + ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER; + + slave_eth_dev->data->dev_conf.rxmode.mtu = + bonded_eth_dev->data->dev_conf.rxmode.mtu; nb_rx_queues = bonded_eth_dev->data->nb_rx_queues; nb_tx_queues = bonded_eth_dev->data->nb_tx_queues; @@ -1788,12 +1790,13 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, != 0) return errval; - if (bond_ethdev_8023ad_flow_verify(bonded_eth_dev, - slave_eth_dev->data->port_id) != 0) { + errval = bond_ethdev_8023ad_flow_verify(bonded_eth_dev, + slave_eth_dev->data->port_id); + if (errval != 0) { RTE_BOND_LOG(ERR, - "rte_eth_tx_queue_setup: port=%d queue_id %d, err (%d)", - slave_eth_dev->data->port_id, q_id, errval); - return -1; + "bond_ethdev_8023ad_flow_verify: port=%d, err (%d)", + slave_eth_dev->data->port_id, errval); + return errval; } if (internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id] != NULL) @@ -1801,8 +1804,14 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, internals->mode4.dedicated_queues.flow[slave_eth_dev->data->port_id], &flow_error); - bond_ethdev_8023ad_flow_set(bonded_eth_dev, + errval = bond_ethdev_8023ad_flow_set(bonded_eth_dev, slave_eth_dev->data->port_id); + if (errval != 0) { + RTE_BOND_LOG(ERR, + "bond_ethdev_8023ad_flow_set: port=%d, err (%d)", + slave_eth_dev->data->port_id, errval); + return errval; + } } /* Start device */ @@ -1814,7 +1823,7 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, } /* If RSS is enabled for bonding, synchronize RETA */ - if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { + if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { int i; struct bond_dev_private *internals; @@ -1876,7 +1885,7 @@ slave_remove(struct bond_dev_private *internals, internals->slave_count--; /* force reconfiguration of slave interfaces */ - _rte_eth_dev_reset(slave_eth_dev); + rte_eth_dev_internal_reset(slave_eth_dev); } static void @@ -1937,7 +1946,7 @@ bond_ethdev_start(struct rte_eth_dev *eth_dev) return -1; } - eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; eth_dev->data->dev_started = 1; internals = eth_dev->data->dev_private; @@ -2043,11 +2052,12 @@ bond_ethdev_free_queues(struct rte_eth_dev *dev) } } -void +int bond_ethdev_stop(struct rte_eth_dev *eth_dev) { struct bond_dev_private *internals = eth_dev->data->dev_private; uint16_t i; + int ret; if (internals->mode == BONDING_MODE_8023AD) { struct port *port; @@ -2076,7 +2086,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) tlb_last_obytets[internals->active_slaves[i]] = 0; } - eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; + eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; eth_dev->data->dev_started = 0; internals->link_status_polling_enabled = 0; @@ -2086,13 +2096,20 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) internals->active_slave_count, slave_id) != internals->active_slave_count) { internals->slaves[i].last_link_status = 0; - rte_eth_dev_stop(slave_id); + ret = rte_eth_dev_stop(slave_id); + if (ret != 0) { + RTE_BOND_LOG(ERR, "Failed to stop device on port %u", + slave_id); + return ret; + } deactivate_slave(eth_dev, slave_id); } } + + return 0; } -void +int bond_ethdev_close(struct rte_eth_dev *dev) { struct bond_dev_private *internals = dev->data->dev_private; @@ -2100,11 +2117,18 @@ bond_ethdev_close(struct rte_eth_dev *dev) int skipped = 0; struct rte_flow_error ferror; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + RTE_BOND_LOG(INFO, "Closing bonded device %s", dev->device->name); while (internals->slave_count != skipped) { uint16_t port_id = internals->slaves[skipped].port_id; - rte_eth_dev_stop(port_id); + if (rte_eth_dev_stop(port_id) != 0) { + RTE_BOND_LOG(ERR, "Failed to stop device on port %u", + port_id); + skipped++; + } if (rte_eth_bond_slave_remove(bond_port_id, port_id) != 0) { RTE_BOND_LOG(ERR, @@ -2116,6 +2140,18 @@ bond_ethdev_close(struct rte_eth_dev *dev) bond_flow_ops.flush(dev, &ferror); bond_ethdev_free_queues(dev); rte_bitmap_reset(internals->vlan_filter_bmp); + rte_bitmap_free(internals->vlan_filter_bmp); + rte_free(internals->vlan_filter_bmpmem); + + /* Try to release mempool used in mode6. If the bond + * device is not mode6, free the NULL is not problem. + */ + rte_mempool_free(internals->mode6.mempool); + + if (internals->kvlist != NULL) + rte_kvargs_free(internals->kvlist); + + return 0; } /* forward declaration */ @@ -2204,6 +2240,7 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->flow_type_rss_offloads = internals->flow_type_rss_offloads; dev_info->reta_size = internals->reta_size; + dev_info->hash_key_size = internals->rss_key_len; return 0; } @@ -2285,8 +2322,10 @@ bond_ethdev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, } static void -bond_ethdev_rx_queue_release(void *queue) +bond_ethdev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id) { + void *queue = dev->data->rx_queues[queue_id]; + if (queue == NULL) return; @@ -2294,8 +2333,10 @@ bond_ethdev_rx_queue_release(void *queue) } static void -bond_ethdev_tx_queue_release(void *queue) +bond_ethdev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_id) { + void *queue = dev->data->tx_queues[queue_id]; + if (queue == NULL) return; @@ -2375,15 +2416,15 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) bond_ctx = ethdev->data->dev_private; - ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; + ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE; if (ethdev->data->dev_started == 0 || bond_ctx->active_slave_count == 0) { - ethdev->data->dev_link.link_status = ETH_LINK_DOWN; + ethdev->data->dev_link.link_status = RTE_ETH_LINK_DOWN; return 0; } - ethdev->data->dev_link.link_status = ETH_LINK_UP; + ethdev->data->dev_link.link_status = RTE_ETH_LINK_UP; if (wait_to_complete) link_update = rte_eth_link_get; @@ -2408,7 +2449,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) &slave_link); if (ret < 0) { ethdev->data->dev_link.link_speed = - ETH_SPEED_NUM_NONE; + RTE_ETH_SPEED_NUM_NONE; RTE_BOND_LOG(ERR, "Slave (port %u) link get failed: %s", bond_ctx->active_slaves[idx], @@ -2439,7 +2480,8 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) bond_ctx->mode4.slave_link.link_autoneg; ethdev->data->dev_link.link_duplex = bond_ctx->mode4.slave_link.link_duplex; - /* fall through to update link speed */ + /* fall through */ + /* to update link speed */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: case BONDING_MODE_TLB: @@ -2449,7 +2491,7 @@ bond_ethdev_link_update(struct rte_eth_dev *ethdev, int wait_to_complete) * In theses mode the maximum theoretical link speed is the sum * of all the slaves */ - ethdev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE; + ethdev->data->dev_link.link_speed = RTE_ETH_SPEED_NUM_NONE; one_link_update_succeeded = false; for (idx = 0; idx < bond_ctx->active_slave_count; idx++) { @@ -2642,24 +2684,41 @@ bond_ethdev_promiscuous_disable(struct rte_eth_dev *dev) return ret; } -static void +static int bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev) { struct bond_dev_private *internals = eth_dev->data->dev_private; int i; + int ret = 0; + uint16_t port_id; switch (internals->mode) { /* allmulti mode is propagated to all slaves */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: - case BONDING_MODE_8023AD: + case BONDING_MODE_8023AD: { + unsigned int slave_ok = 0; + for (i = 0; i < internals->slave_count; i++) { - uint16_t port_id = internals->slaves[i].port_id; + port_id = internals->slaves[i].port_id; - rte_eth_allmulticast_enable(port_id); + ret = rte_eth_allmulticast_enable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to enable allmulti mode for port %u: %s", + port_id, rte_strerror(-ret)); + else + slave_ok++; } + /* + * Report success if operation is successful on at least + * on one slave. Otherwise return last error code. + */ + if (slave_ok > 0) + ret = 0; break; + } /* allmulti mode is propagated only to primary slave */ case BONDING_MODE_ACTIVE_BACKUP: case BONDING_MODE_TLB: @@ -2668,22 +2727,33 @@ bond_ethdev_allmulticast_enable(struct rte_eth_dev *eth_dev) /* Do not touch allmulti when there cannot be primary ports */ if (internals->slave_count == 0) break; - rte_eth_allmulticast_enable(internals->current_primary_port); + port_id = internals->current_primary_port; + ret = rte_eth_allmulticast_enable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to enable allmulti mode for port %u: %s", + port_id, rte_strerror(-ret)); } + + return ret; } -static void +static int bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev) { struct bond_dev_private *internals = eth_dev->data->dev_private; int i; + int ret = 0; + uint16_t port_id; switch (internals->mode) { /* allmulti mode is propagated to all slaves */ case BONDING_MODE_ROUND_ROBIN: case BONDING_MODE_BALANCE: case BONDING_MODE_BROADCAST: - case BONDING_MODE_8023AD: + case BONDING_MODE_8023AD: { + unsigned int slave_ok = 0; + for (i = 0; i < internals->slave_count; i++) { uint16_t port_id = internals->slaves[i].port_id; @@ -2691,9 +2761,23 @@ bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev) bond_mode_8023ad_ports[port_id].forced_rx_flags == BOND_8023AD_FORCED_ALLMULTI) continue; - rte_eth_allmulticast_disable(port_id); + + ret = rte_eth_allmulticast_disable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to disable allmulti mode for port %u: %s", + port_id, rte_strerror(-ret)); + else + slave_ok++; } + /* + * Report success if operation is successful on at least + * on one slave. Otherwise return last error code. + */ + if (slave_ok > 0) + ret = 0; break; + } /* allmulti mode is propagated only to primary slave */ case BONDING_MODE_ACTIVE_BACKUP: case BONDING_MODE_TLB: @@ -2702,8 +2786,15 @@ bond_ethdev_allmulticast_disable(struct rte_eth_dev *eth_dev) /* Do not touch allmulti when there cannot be primary ports */ if (internals->slave_count == 0) break; - rte_eth_allmulticast_disable(internals->current_primary_port); + port_id = internals->current_primary_port; + ret = rte_eth_allmulticast_disable(port_id); + if (ret != 0) + RTE_BOND_LOG(ERR, + "Failed to disable allmulti mode for port %u: %s", + port_id, rte_strerror(-ret)); } + + return ret; } static void @@ -2712,7 +2803,7 @@ bond_ethdev_delayed_lsc_propagation(void *arg) if (arg == NULL) return; - _rte_eth_dev_callback_process((struct rte_eth_dev *)arg, + rte_eth_dev_callback_process((struct rte_eth_dev *)arg, RTE_ETH_EVENT_INTR_LSC, NULL); } @@ -2774,7 +2865,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, goto link_update; /* check link state properties if bonded link is up*/ - if (bonded_eth_dev->data->dev_link.link_status == ETH_LINK_UP) { + if (bonded_eth_dev->data->dev_link.link_status == RTE_ETH_LINK_UP) { if (link_properties_valid(bonded_eth_dev, &link) != 0) RTE_BOND_LOG(ERR, "Invalid link properties " "for slave %d in bonding mode %d", @@ -2790,7 +2881,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, if (internals->active_slave_count < 1) { /* If first active slave, then change link status */ bonded_eth_dev->data->dev_link.link_status = - ETH_LINK_UP; + RTE_ETH_LINK_UP; internals->current_primary_port = port_id; lsc_flag = 1; @@ -2823,6 +2914,7 @@ bond_ethdev_lsc_event_callback(uint16_t port_id, enum rte_eth_event_type type, internals->active_slaves[0]); else internals->current_primary_port = internals->primary_port; + mac_address_slaves_update(bonded_eth_dev); } } @@ -2846,7 +2938,7 @@ link_update: bond_ethdev_delayed_lsc_propagation, (void *)bonded_eth_dev); else - _rte_eth_dev_callback_process(bonded_eth_dev, + rte_eth_dev_callback_process(bonded_eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); @@ -2856,7 +2948,7 @@ link_update: bond_ethdev_delayed_lsc_propagation, (void *)bonded_eth_dev); else - _rte_eth_dev_callback_process(bonded_eth_dev, + rte_eth_dev_callback_process(bonded_eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL); } @@ -2881,11 +2973,12 @@ bond_ethdev_rss_reta_update(struct rte_eth_dev *dev, return -EINVAL; /* Copy RETA table */ - reta_count = reta_size / RTE_RETA_GROUP_SIZE; + reta_count = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / + RTE_ETH_RETA_GROUP_SIZE; for (i = 0; i < reta_count; i++) { internals->reta_conf[i].mask = reta_conf[i].mask; - for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) if ((reta_conf[i].mask >> j) & 0x01) internals->reta_conf[i].reta[j] = reta_conf[i].reta[j]; } @@ -2918,8 +3011,8 @@ bond_ethdev_rss_reta_query(struct rte_eth_dev *dev, return -EINVAL; /* Copy RETA table */ - for (i = 0; i < reta_size / RTE_RETA_GROUP_SIZE; i++) - for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + for (i = 0; i < reta_size / RTE_ETH_RETA_GROUP_SIZE; i++) + for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) if ((reta_conf[i].mask >> j) & 0x01) reta_conf[i].reta[j] = internals->reta_conf[i].reta[j]; @@ -2941,13 +3034,15 @@ bond_ethdev_rss_hash_update(struct rte_eth_dev *dev, if (bond_rss_conf.rss_hf != 0) dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf = bond_rss_conf.rss_hf; - if (bond_rss_conf.rss_key && bond_rss_conf.rss_key_len < - sizeof(internals->rss_key)) { - if (bond_rss_conf.rss_key_len == 0) - bond_rss_conf.rss_key_len = 40; - internals->rss_key_len = bond_rss_conf.rss_key_len; + if (bond_rss_conf.rss_key) { + if (bond_rss_conf.rss_key_len < internals->rss_key_len) + return -EINVAL; + else if (bond_rss_conf.rss_key_len > internals->rss_key_len) + RTE_BOND_LOG(WARNING, "rss_key will be truncated"); + memcpy(internals->rss_key, bond_rss_conf.rss_key, internals->rss_key_len); + bond_rss_conf.rss_key_len = internals->rss_key_len; } for (i = 0; i < internals->slave_count; i++) { @@ -3015,14 +3110,11 @@ bond_ethdev_mac_address_set(struct rte_eth_dev *dev, } static int -bond_filter_ctrl(struct rte_eth_dev *dev __rte_unused, - enum rte_filter_type type, enum rte_filter_op op, void *arg) +bond_flow_ops_get(struct rte_eth_dev *dev __rte_unused, + const struct rte_flow_ops **ops) { - if (type == RTE_ETH_FILTER_GENERIC && op == RTE_ETH_FILTER_GET) { - *(const void **)arg = &bond_flow_ops; - return 0; - } - return -ENOTSUP; + *ops = &bond_flow_ops; + return 0; } static int @@ -3114,7 +3206,7 @@ const struct eth_dev_ops default_dev_ops = { .mac_addr_set = bond_ethdev_mac_address_set, .mac_addr_add = bond_ethdev_mac_addr_add, .mac_addr_remove = bond_ethdev_mac_addr_remove, - .filter_ctrl = bond_filter_ctrl + .flow_ops_get = bond_flow_ops_get }; static int @@ -3152,7 +3244,8 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) } eth_dev->dev_ops = &default_dev_ops; - eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC; + eth_dev->data->dev_flags = RTE_ETH_DEV_INTR_LSC | + RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; rte_spinlock_init(&internals->lock); rte_spinlock_init(&internals->lsc_lock); @@ -3181,7 +3274,7 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) internals->max_rx_pktlen = 0; /* Initially allow to choose any offload type */ - internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; + internals->flow_type_rss_offloads = RTE_ETH_RSS_PROTO_MASK; memset(&internals->default_rxconf, 0, sizeof(internals->default_rxconf)); @@ -3242,8 +3335,9 @@ bond_probe(struct rte_vdev_device *dev) const char *name; struct bond_dev_private *internals; struct rte_kvargs *kvlist; - uint8_t bonding_mode, socket_id/*, agg_mode*/; - int arg_count, port_id; + uint8_t bonding_mode; + int arg_count, port_id; + int socket_id; uint8_t agg_mode; struct rte_eth_dev *eth_dev; @@ -3350,6 +3444,7 @@ bond_remove(struct rte_vdev_device *dev) struct rte_eth_dev *eth_dev; struct bond_dev_private *internals; const char *name; + int ret = 0; if (!dev) return -EINVAL; @@ -3357,14 +3452,10 @@ bond_remove(struct rte_vdev_device *dev) name = rte_vdev_device_name(dev); RTE_BOND_LOG(INFO, "Uninitializing pmd_bond for %s", name); - /* now free all data allocation - for eth_dev structure, - * dummy pci driver and internal (private) data - */ - /* find an ethdev entry */ eth_dev = rte_eth_dev_allocated(name); if (eth_dev == NULL) - return -ENODEV; + return 0; /* port already released */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) return rte_eth_dev_release_port(eth_dev); @@ -3376,25 +3467,12 @@ bond_remove(struct rte_vdev_device *dev) return -EBUSY; if (eth_dev->data->dev_started == 1) { - bond_ethdev_stop(eth_dev); + ret = bond_ethdev_stop(eth_dev); bond_ethdev_close(eth_dev); } - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - - internals = eth_dev->data->dev_private; - /* Try to release mempool used in mode6. If the bond - * device is not mode6, free the NULL is not problem. - */ - rte_mempool_free(internals->mode6.mempool); - rte_bitmap_free(internals->vlan_filter_bmp); - rte_free(internals->vlan_filter_bmpmem); - rte_eth_dev_release_port(eth_dev); - return 0; + return ret; } /* this part will resolve the slave portids after all the other pdev and vdev @@ -3423,24 +3501,34 @@ bond_ethdev_configure(struct rte_eth_dev *dev) * set key to the the value specified in port RSS configuration. * Fall back to default RSS key if the key is not specified */ - if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { - if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) { - internals->rss_key_len = - dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; - memcpy(internals->rss_key, - dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key, + if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS) { + struct rte_eth_rss_conf *rss_conf = + &dev->data->dev_conf.rx_adv_conf.rss_conf; + if (rss_conf->rss_key != NULL) { + if (internals->rss_key_len > rss_conf->rss_key_len) { + RTE_BOND_LOG(ERR, "Invalid rss key length(%u)", + rss_conf->rss_key_len); + return -EINVAL; + } + + memcpy(internals->rss_key, rss_conf->rss_key, internals->rss_key_len); } else { - internals->rss_key_len = sizeof(default_rss_key); + if (internals->rss_key_len > sizeof(default_rss_key)) { + RTE_BOND_LOG(ERR, + "There is no suitable default hash key"); + return -EINVAL; + } + memcpy(internals->rss_key, default_rss_key, internals->rss_key_len); } for (i = 0; i < RTE_DIM(internals->reta_conf); i++) { internals->reta_conf[i].mask = ~0LL; - for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + for (j = 0; j < RTE_ETH_RETA_GROUP_SIZE; j++) internals->reta_conf[i].reta[j] = - (i * RTE_RETA_GROUP_SIZE + j) % + (i * RTE_ETH_RETA_GROUP_SIZE + j) % dev->data->nb_rx_queues; } } @@ -3700,11 +3788,7 @@ RTE_PMD_REGISTER_PARAM_STRING(net_bonding, "up_delay= " "down_delay="); -int bond_logtype; - -RTE_INIT(bond_init_log) -{ - bond_logtype = rte_log_register("pmd.net.bond"); - if (bond_logtype >= 0) - rte_log_set_level(bond_logtype, RTE_LOG_NOTICE); -} +/* We can't use RTE_LOG_REGISTER_DEFAULT because of the forced name for + * this library, see meson.build. + */ +RTE_LOG_REGISTER(bond_logtype, pmd.net.bonding, NOTICE);