X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=inline;f=drivers%2Fnet%2Fbonding%2Frte_eth_bond_pmd.c;h=44deaf119398a4e1979d7c0d49bd8074b2a7f561;hb=0bf1e98f104384c124c8bd0a94a72c06d608f213;hp=b84f3226305b47b764ce543442d62952f4cdf90e;hpb=6b2a47de074a0a6abc9a7c8d1aaa2a66898b7b48;p=dpdk.git diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index b84f322630..44deaf1193 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -58,28 +58,34 @@ bond_ethdev_rx_burst(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) { struct bond_dev_private *internals; - uint16_t num_rx_slave = 0; uint16_t num_rx_total = 0; - + uint16_t slave_count; + uint16_t active_slave; int i; /* Cast to structure, containing bonded device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; - internals = bd_rx_q->dev_private; + slave_count = internals->active_slave_count; + active_slave = internals->active_slave; + for (i = 0; i < slave_count && nb_pkts; i++) { + uint16_t num_rx_slave; - for (i = 0; i < internals->active_slave_count && nb_pkts; i++) { /* Offset of pointer to *bufs increases as packets are received * from other slaves */ - num_rx_slave = rte_eth_rx_burst(internals->active_slaves[i], - bd_rx_q->queue_id, bufs + num_rx_total, nb_pkts); - if (num_rx_slave) { - num_rx_total += num_rx_slave; - nb_pkts -= num_rx_slave; - } + num_rx_slave = + rte_eth_rx_burst(internals->active_slaves[active_slave], + bd_rx_q->queue_id, + bufs + num_rx_total, nb_pkts); + num_rx_total += num_rx_slave; + nb_pkts -= num_rx_slave; + if (++active_slave == slave_count) + active_slave = 0; } + if (++internals->active_slave == slave_count) + internals->active_slave = 0; return num_rx_total; } @@ -258,25 +264,32 @@ bond_ethdev_rx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, uint16_t num_rx_total = 0; /* Total number of received packets */ uint16_t slaves[RTE_MAX_ETHPORTS]; uint16_t slave_count; - - uint16_t i, idx; + uint16_t active_slave; + uint16_t i; /* Copy slave list to protect against slave up/down changes during tx * bursting */ slave_count = internals->active_slave_count; + active_slave = internals->active_slave; memcpy(slaves, internals->active_slaves, sizeof(internals->active_slaves[0]) * slave_count); - for (i = 0, idx = internals->active_slave; - i < slave_count && num_rx_total < nb_pkts; i++, idx++) { - idx = idx % slave_count; + for (i = 0; i < slave_count && nb_pkts; i++) { + uint16_t num_rx_slave; /* Read packets from this slave */ - num_rx_total += rte_eth_rx_burst(slaves[idx], bd_rx_q->queue_id, - &bufs[num_rx_total], nb_pkts - num_rx_total); + num_rx_slave = rte_eth_rx_burst(slaves[active_slave], + bd_rx_q->queue_id, + bufs + num_rx_total, nb_pkts); + num_rx_total += num_rx_slave; + nb_pkts -= num_rx_slave; + + if (++active_slave == slave_count) + active_slave = 0; } - internals->active_slave = idx; + if (++internals->active_slave == slave_count) + internals->active_slave = 0; return num_rx_total; } @@ -321,7 +334,7 @@ bond_ethdev_tx_burst_8023ad_fast_queue(void *queue, struct rte_mbuf **bufs, dist_slave_count = 0; for (i = 0; i < slave_count; i++) { - struct port *port = &mode_8023ad_ports[slave_port_ids[i]]; + struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; if (ACTOR_STATE(port, DISTRIBUTING)) dist_slave_port_ids[dist_slave_count++] = @@ -379,8 +392,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, /* Cast to structure, containing bonded device's port id and queue id */ struct bond_rx_queue *bd_rx_q = (struct bond_rx_queue *)queue; struct bond_dev_private *internals = bd_rx_q->dev_private; - struct ether_addr bond_mac; - + struct rte_eth_dev *bonded_eth_dev = + &rte_eth_devices[internals->port_id]; + struct ether_addr *bond_mac = bonded_eth_dev->data->mac_addrs; struct ether_hdr *hdr; const uint16_t ether_type_slow_be = rte_be_to_cpu_16(ETHER_TYPE_SLOW); @@ -393,7 +407,6 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint8_t i, j, k; uint8_t subtype; - rte_eth_macaddr_get(internals->port_id, &bond_mac); /* Copy slave list to protect against slave up/down changes during tx * bursting */ slave_count = internals->active_slave_count; @@ -407,7 +420,7 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, } for (i = 0; i < slave_count && num_rx_total < nb_pkts; i++) { j = num_rx_total; - collecting = ACTOR_STATE(&mode_8023ad_ports[slaves[idx]], + collecting = ACTOR_STATE(&bond_mode_8023ad_ports[slaves[idx]], COLLECTING); /* Read packets from this slave */ @@ -436,9 +449,11 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, * in collecting state or bonding interface is not in promiscuous * mode and packet address does not match. */ if (unlikely(is_lacp_packets(hdr->ether_type, subtype, bufs[j]) || - !collecting || (!promisc && - !is_multicast_ether_addr(&hdr->d_addr) && - !is_same_ether_addr(&bond_mac, &hdr->d_addr)))) { + !collecting || + (!promisc && + !is_multicast_ether_addr(&hdr->d_addr) && + !is_same_ether_addr(bond_mac, + &hdr->d_addr)))) { if (hdr->ether_type == ether_type_slow_be) { bond_mode_8023ad_handle_slow_pkt( @@ -459,7 +474,9 @@ bond_ethdev_rx_burst_8023ad(void *queue, struct rte_mbuf **bufs, idx = 0; } - internals->active_slave = idx; + if (++internals->active_slave == slave_count) + internals->active_slave = 0; + return num_rx_total; } @@ -1295,7 +1312,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, dist_slave_count = 0; for (i = 0; i < slave_count; i++) { - struct port *port = &mode_8023ad_ports[slave_port_ids[i]]; + struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; if (ACTOR_STATE(port, DISTRIBUTING)) dist_slave_port_ids[dist_slave_count++] = @@ -1350,7 +1367,7 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, /* Check for LACP control packets and send if available */ for (i = 0; i < slave_count; i++) { - struct port *port = &mode_8023ad_ports[slave_port_ids[i]]; + struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]]; struct rte_mbuf *ctrl_pkt = NULL; if (likely(rte_ring_empty(port->tx_ring))) @@ -1701,7 +1718,7 @@ slave_configure_slow_queue(struct rte_eth_dev *bonded_eth_dev, int errval = 0; struct bond_dev_private *internals = (struct bond_dev_private *) bonded_eth_dev->data->dev_private; - struct port *port = &mode_8023ad_ports[slave_eth_dev->data->port_id]; + struct port *port = &bond_mode_8023ad_ports[slave_eth_dev->data->port_id]; if (port->slow_pool == NULL) { char mem_name[256]; @@ -1778,12 +1795,11 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev, /* If RSS is enabled for bonding, try to enable it for slaves */ if (bonded_eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { - if (bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len - != 0) { + if (internals->rss_key_len != 0) { slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = - bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; + internals->rss_key_len; slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = - bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key; + internals->rss_key; } else { slave_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; } @@ -2141,7 +2157,7 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) /* Discard all messages to/from mode 4 state machines */ for (i = 0; i < internals->active_slave_count; i++) { - port = &mode_8023ad_ports[internals->active_slaves[i]]; + port = &bond_mode_8023ad_ports[internals->active_slaves[i]]; RTE_ASSERT(port->rx_ring != NULL); while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT) @@ -2160,12 +2176,20 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) tlb_last_obytets[internals->active_slaves[i]] = 0; } - internals->link_status_polling_enabled = 0; - for (i = 0; i < internals->slave_count; i++) - internals->slaves[i].last_link_status = 0; - eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; eth_dev->data->dev_started = 0; + + internals->link_status_polling_enabled = 0; + for (i = 0; i < internals->slave_count; i++) { + uint16_t slave_id = internals->slaves[i].port_id; + if (find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_id) != + internals->active_slave_count) { + internals->slaves[i].last_link_status = 0; + rte_eth_dev_stop(slave_id); + deactivate_slave(eth_dev, slave_id); + } + } } void @@ -2234,6 +2258,16 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = max_nb_rx_queues; dev_info->max_tx_queues = max_nb_tx_queues; + memcpy(&dev_info->default_rxconf, &internals->default_rxconf, + sizeof(dev_info->default_rxconf)); + memcpy(&dev_info->default_txconf, &internals->default_txconf, + sizeof(dev_info->default_txconf)); + + memcpy(&dev_info->rx_desc_lim, &internals->rx_desc_lim, + sizeof(dev_info->rx_desc_lim)); + memcpy(&dev_info->tx_desc_lim, &internals->tx_desc_lim, + sizeof(dev_info->tx_desc_lim)); + /** * If dedicated hw queues enabled for link bonding device in LACP mode * then we need to reduce the maximum number of data path queues by 1. @@ -3054,6 +3088,14 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) /* Initially allow to choose any offload type */ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; + memset(&internals->default_rxconf, 0, + sizeof(internals->default_rxconf)); + memset(&internals->default_txconf, 0, + sizeof(internals->default_txconf)); + + memset(&internals->rx_desc_lim, 0, sizeof(internals->rx_desc_lim)); + memset(&internals->tx_desc_lim, 0, sizeof(internals->tx_desc_lim)); + memset(internals->active_slaves, 0, sizeof(internals->active_slaves)); memset(internals->slaves, 0, sizeof(internals->slaves)); @@ -3093,10 +3135,9 @@ bond_alloc(struct rte_vdev_device *dev, uint8_t mode) err: rte_free(internals); - if (eth_dev != NULL) { - rte_free(eth_dev->data->mac_addrs); - rte_eth_dev_release_port(eth_dev); - } + if (eth_dev != NULL) + eth_dev->data->dev_private = NULL; + rte_eth_dev_release_port(eth_dev); return -1; } @@ -3117,8 +3158,7 @@ bond_probe(struct rte_vdev_device *dev) name = rte_vdev_device_name(dev); RTE_BOND_LOG(INFO, "Initializing pmd_bond for %s", name); - if (rte_eal_process_type() == RTE_PROC_SECONDARY && - strlen(rte_vdev_device_args(dev)) == 0) { + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { eth_dev = rte_eth_dev_attach_secondary(name); if (!eth_dev) { RTE_BOND_LOG(ERR, "Failed to probe %s", name); @@ -3181,8 +3221,6 @@ bond_probe(struct rte_vdev_device *dev) internals = rte_eth_devices[port_id].data->dev_private; internals->kvlist = kvlist; - rte_eth_dev_probing_finish(&rte_eth_devices[port_id]); - if (rte_kvargs_count(kvlist, PMD_BOND_AGG_MODE_KVARG) == 1) { if (rte_kvargs_process(kvlist, PMD_BOND_AGG_MODE_KVARG, @@ -3195,12 +3233,12 @@ bond_probe(struct rte_vdev_device *dev) } if (internals->mode == BONDING_MODE_8023AD) - rte_eth_bond_8023ad_agg_selection_set(port_id, - agg_mode); + internals->mode4.agg_selection = agg_mode; } else { - rte_eth_bond_8023ad_agg_selection_set(port_id, AGG_STABLE); + internals->mode4.agg_selection = AGG_STABLE; } + rte_eth_dev_probing_finish(&rte_eth_devices[port_id]); RTE_BOND_LOG(INFO, "Create bonded device %s on port %d in mode %u on " "socket %u.", name, port_id, bonding_mode, socket_id); return 0; @@ -3233,6 +3271,9 @@ bond_remove(struct rte_vdev_device *dev) if (eth_dev == NULL) return -ENODEV; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return rte_eth_dev_release_port(eth_dev); + RTE_ASSERT(eth_dev->device == &dev->device); internals = eth_dev->data->dev_private; @@ -3255,8 +3296,6 @@ bond_remove(struct rte_vdev_device *dev) rte_mempool_free(internals->mode6.mempool); rte_bitmap_free(internals->vlan_filter_bmp); rte_free(internals->vlan_filter_bmpmem); - rte_free(eth_dev->data->dev_private); - rte_free(eth_dev->data->mac_addrs); rte_eth_dev_release_port(eth_dev); @@ -3284,16 +3323,30 @@ bond_ethdev_configure(struct rte_eth_dev *dev) unsigned i, j; - /* If RSS is enabled, fill table and key with default values */ + /* + * If RSS is enabled, fill table with default values and + * set key to the the value specified in port RSS configuration. + * Fall back to default RSS key if the key is not specified + */ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) { - dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key = internals->rss_key; - dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len = 0; - memcpy(internals->rss_key, default_rss_key, 40); + if (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key != NULL) { + internals->rss_key_len = + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; + memcpy(internals->rss_key, + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key, + internals->rss_key_len); + } else { + internals->rss_key_len = sizeof(default_rss_key); + memcpy(internals->rss_key, default_rss_key, + internals->rss_key_len); + } for (i = 0; i < RTE_DIM(internals->reta_conf); i++) { internals->reta_conf[i].mask = ~0LL; for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) - internals->reta_conf[i].reta[j] = j % dev->data->nb_rx_queues; + internals->reta_conf[i].reta[j] = + (i * RTE_RETA_GROUP_SIZE + j) % + dev->data->nb_rx_queues; } } @@ -3370,9 +3423,16 @@ bond_ethdev_configure(struct rte_eth_dev *dev) "Failed to parse agg selection mode for bonded device %s", name); } - if (internals->mode == BONDING_MODE_8023AD) - rte_eth_bond_8023ad_agg_selection_set(port_id, - agg_mode); + if (internals->mode == BONDING_MODE_8023AD) { + int ret = rte_eth_bond_8023ad_agg_selection_set(port_id, + agg_mode); + if (ret < 0) { + RTE_BOND_LOG(ERR, + "Invalid args for agg selection set for bonded device %s", + name); + return -1; + } + } } /* Parse/add slave ports to bonded device */ @@ -3549,7 +3609,7 @@ int bond_logtype; RTE_INIT(bond_init_log) { - bond_logtype = rte_log_register("pmd.net.bon"); + bond_logtype = rte_log_register("pmd.net.bond"); if (bond_logtype >= 0) rte_log_set_level(bond_logtype, RTE_LOG_NOTICE); }