X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fcxgbe%2Fcxgbe_ethdev.c;h=98d0362fa3a7102be9abb23256893188c09b445f;hb=f30e69b41f949cd4a9afb6ff39de196e661708e2;hp=030e0469c38f80ef2447d704059a3b63ce9bbc93;hpb=9970a9ad07db7745ca6bc441819b287940ae86ea;p=dpdk.git diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index 030e0469c3..98d0362fa3 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -38,8 +38,6 @@ #include "cxgbe_pfvf.h" #include "cxgbe_flow.h" -int cxgbe_logtype; - /* * Macros needed to support the PCI Device ID Table ... */ @@ -67,20 +65,25 @@ uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; uint16_t pkts_sent, pkts_remain; uint16_t total_sent = 0; + uint16_t idx = 0; int ret = 0; - CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n", - __func__, txq, tx_pkts, nb_pkts); - t4_os_lock(&txq->txq_lock); /* free up desc from already completed tx */ reclaim_completed_tx(&txq->q); + if (unlikely(!nb_pkts)) + goto out_unlock; + + rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *)); while (total_sent < nb_pkts) { pkts_remain = nb_pkts - total_sent; for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) { - ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent], - nb_pkts); + idx = total_sent + pkts_sent; + if ((idx + 1) < nb_pkts) + rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1], + volatile void *)); + ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts); if (ret < 0) break; } @@ -91,6 +94,7 @@ uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, reclaim_completed_tx(&txq->q); } +out_unlock: t4_os_unlock(&txq->txq_lock); return total_sent; } @@ -101,13 +105,9 @@ uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue; unsigned int work_done; - CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n", - __func__, rxq->rspq.cntxt_id, nb_pkts); - if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done)) dev_err(adapter, "error in cxgbe poll\n"); - CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done); return work_done; } @@ -116,7 +116,6 @@ int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; - int max_queues = adapter->sge.max_ethqsets / adapter->params.nports; static const struct rte_eth_desc_lim cxgbe_desc_lim = { .nb_max = CXGBE_MAX_RING_DESC_SIZE, @@ -126,8 +125,8 @@ int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE; device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN; - device_info->max_rx_queues = max_queues; - device_info->max_tx_queues = max_queues; + device_info->max_rx_queues = adapter->sge.max_ethqsets; + device_info->max_tx_queues = adapter->sge.max_ethqsets; device_info->max_mac_addrs = 1; /* XXX: For now we support one MAC/port */ device_info->max_vfs = adapter->params.arch.vfcount; @@ -168,26 +167,26 @@ int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) 0, -1, 1, -1, false); } -void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - -1, 1, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 1, 1, -1, false); } -void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - -1, 0, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 0, 1, -1, false); } int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, @@ -201,6 +200,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, u8 old_link = pi->link_cfg.link_ok; for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) { + if (!s->fw_evtq.desc) + break; + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); /* Exit if link status changed or always forced up */ @@ -234,6 +236,9 @@ int cxgbe_dev_set_link_up(struct rte_eth_dev *dev) struct sge *s = &adapter->sge; int ret; + if (!s->fw_evtq.desc) + return -ENOMEM; + /* Flush all link events */ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); @@ -260,6 +265,9 @@ int cxgbe_dev_set_link_down(struct rte_eth_dev *dev) struct sge *s = &adapter->sge; int ret; + if (!s->fw_evtq.desc) + return -ENOMEM; + /* Flush all link events */ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); @@ -310,23 +318,41 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) /* * Stop device. */ -void cxgbe_dev_close(struct rte_eth_dev *eth_dev) +int cxgbe_dev_close(struct rte_eth_dev *eth_dev) { - struct port_info *pi = eth_dev->data->dev_private; + struct port_info *temp_pi, *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; + u8 i; CXGBE_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + if (!(adapter->flags & FULL_INIT_DONE)) - return; + return 0; + + if (!pi->viid) + return 0; cxgbe_down(pi); + t4_sge_eth_release_queues(pi); + t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid); + pi->viid = 0; - /* - * We clear queues only if both tx and rx path of the port - * have been disabled + /* Free up the adapter-wide resources only after all the ports + * under this PF have been closed. */ - t4_sge_eth_clear_queues(pi); + for_each_port(adapter, i) { + temp_pi = adap2pinfo(adapter, i); + if (temp_pi->viid) + return 0; + } + + cxgbe_close(adapter); + rte_free(adapter); + + return 0; } /* Start the device. @@ -390,7 +416,7 @@ out: /* * Stop device: disable rx and tx functions to allow for reconfiguring. */ -void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) +int cxgbe_dev_stop(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; @@ -398,7 +424,7 @@ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) CXGBE_FUNC_TRACE(); if (!(adapter->flags & FULL_INIT_DONE)) - return; + return 0; cxgbe_down(pi); @@ -408,6 +434,8 @@ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) */ t4_sge_eth_clear_queues(pi); eth_dev->data->scattered_rx = 0; + + return 0; } int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) @@ -418,6 +446,10 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) CXGBE_FUNC_TRACE(); + if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_RSS_HASH; + if (!(adapter->flags & FW_QUEUE_BOUND)) { err = cxgbe_setup_sge_fwevtq(adapter); if (err) @@ -475,13 +507,14 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; - struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx]; - int err = 0; unsigned int temp_nb_desc; + struct sge_eth_txq *txq; + int err = 0; + txq = &s->ethtxq[pi->first_txqset + queue_idx]; dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n", __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc, - socket_id, pi->first_qset); + socket_id, pi->first_txqset); /* Free up the existing queue */ if (eth_dev->data->tx_queues[queue_idx]) { @@ -536,17 +569,16 @@ void cxgbe_dev_tx_queue_release(void *q) int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { - int ret; struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; - struct sge_rspq *q; + struct sge_eth_rxq *rxq; + int ret; dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", __func__, pi->port_id, rx_queue_id); - q = eth_dev->data->rx_queues[rx_queue_id]; - - ret = t4_sge_eth_rxq_start(adap, q); + rxq = eth_dev->data->rx_queues[rx_queue_id]; + ret = t4_sge_eth_rxq_start(adap, rxq); if (ret == 0) eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; @@ -555,16 +587,16 @@ int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { - int ret; struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; - struct sge_rspq *q; + struct sge_eth_rxq *rxq; + int ret; dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", __func__, pi->port_id, rx_queue_id); - q = eth_dev->data->rx_queues[rx_queue_id]; - ret = t4_sge_eth_rxq_stop(adap, q); + rxq = eth_dev->data->rx_queues[rx_queue_id]; + ret = t4_sge_eth_rxq_stop(adap, rxq); if (ret == 0) eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -577,16 +609,16 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, const struct rte_eth_rxconf *rx_conf __rte_unused, struct rte_mempool *mp) { + unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; + struct rte_eth_dev_info dev_info; struct sge *s = &adapter->sge; - struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx]; - int err = 0; - int msi_idx = 0; unsigned int temp_nb_desc; - struct rte_eth_dev_info dev_info; - unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + int err = 0, msi_idx = 0; + struct sge_eth_rxq *rxq; + rxq = &s->ethrxq[pi->first_rxqset + queue_idx]; dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n", __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc, socket_id, mp); @@ -659,11 +691,10 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, void cxgbe_dev_rx_queue_release(void *q) { struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q; - struct sge_rspq *rq = &rxq->rspq; - if (rq) { + if (rxq) { struct port_info *pi = (struct port_info *) - (rq->eth_dev->data->dev_private); + (rxq->rspq.eth_dev->data->dev_private); struct adapter *adap = pi->adapter; dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", @@ -703,7 +734,7 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, for (i = 0; i < pi->n_rx_qsets; i++) { struct sge_eth_rxq *rxq = - &s->ethrxq[pi->first_qset + i]; + &s->ethrxq[pi->first_rxqset + i]; eth_stats->q_ipackets[i] = rxq->stats.pkts; eth_stats->q_ibytes[i] = rxq->stats.rx_bytes; @@ -713,7 +744,7 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, for (i = 0; i < pi->n_tx_qsets; i++) { struct sge_eth_txq *txq = - &s->ethtxq[pi->first_qset + i]; + &s->ethtxq[pi->first_txqset + i]; eth_stats->q_opackets[i] = txq->stats.pkts; eth_stats->q_obytes[i] = txq->stats.tx_bytes; @@ -734,14 +765,14 @@ static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) cxgbe_stats_reset(pi); for (i = 0; i < pi->n_rx_qsets; i++) { struct sge_eth_rxq *rxq = - &s->ethrxq[pi->first_qset + i]; + &s->ethrxq[pi->first_rxqset + i]; rxq->stats.pkts = 0; rxq->stats.rx_bytes = 0; } for (i = 0; i < pi->n_tx_qsets; i++) { struct sge_eth_txq *txq = - &s->ethtxq[pi->first_qset + i]; + &s->ethtxq[pi->first_txqset + i]; txq->stats.pkts = 0; txq->stats.tx_bytes = 0; @@ -899,6 +930,69 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, return 0; } +static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + u16 i, idx, shift, *rss; + int ret; + + if (!(adapter->flags & FULL_INIT_DONE)) + return -ENOMEM; + + if (!reta_size || reta_size > pi->rss_size) + return -EINVAL; + + rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0); + if (!rss) + return -ENOMEM; + + rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16)); + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (!(reta_conf[idx].mask & (1ULL << shift))) + continue; + + rss[i] = reta_conf[idx].reta[shift]; + } + + ret = cxgbe_write_rss(pi, rss); + if (!ret) + rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16)); + + rte_free(rss); + return ret; +} + +static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + u16 i, idx, shift; + + if (!(adapter->flags & FULL_INIT_DONE)) + return -ENOMEM; + + if (!reta_size || reta_size > pi->rss_size) + return -EINVAL; + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (!(reta_conf[idx].mask & (1ULL << shift))) + continue; + + reta_conf[idx].reta[shift] = pi->rss[i]; + } + + return 0; +} + static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev) { RTE_SET_USED(dev); @@ -1118,6 +1212,8 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .rss_hash_update = cxgbe_dev_rss_hash_update, .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get, .mac_addr_set = cxgbe_mac_addr_set, + .reta_update = cxgbe_dev_rss_reta_update, + .reta_query = cxgbe_dev_rss_reta_query, }; /* @@ -1166,6 +1262,8 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) return 0; } + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; + snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id); adapter = rte_zmalloc(name, sizeof(*adapter), 0); if (!adapter) @@ -1182,6 +1280,8 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) adapter->eth_dev = eth_dev; pi->adapter = adapter; + cxgbe_process_devargs(adapter); + err = cxgbe_probe(adapter); if (err) { dev_err(adapter, "%s: cxgbe probe failed with err %d\n", @@ -1198,12 +1298,15 @@ out_free_adapter: static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev) { - struct port_info *pi = eth_dev->data->dev_private; - struct adapter *adap = pi->adapter; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + uint16_t port_id; + int err = 0; /* Free up other ports and all resources */ - cxgbe_close(adap); - return 0; + RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) + err |= rte_eth_dev_close(port_id); + + return err == 0 ? 0 : -EIO; } static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, @@ -1229,12 +1332,9 @@ RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl); RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe, - CXGBE_DEVARG_KEEP_OVLAN "=<0|1> " - CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> "); - -RTE_INIT(cxgbe_init_log) -{ - cxgbe_logtype = rte_log_register("pmd.net.cxgbe"); - if (cxgbe_logtype >= 0) - rte_log_set_level(cxgbe_logtype, RTE_LOG_NOTICE); -} + CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> " + CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> " + CXGBE_DEVARG_PF_FILTER_MODE "= " + CXGBE_DEVARG_PF_FILTER_MASK "= "); +RTE_LOG_REGISTER(cxgbe_logtype, pmd.net.cxgbe, NOTICE); +RTE_LOG_REGISTER(cxgbe_mbox_logtype, pmd.net.cxgbe.mbox, NOTICE);