X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcxgbe%2Fcxgbe_ethdev.c;h=1deee2f5c82fb9c85cc5aa1d3669ea3feb22cafe;hb=a3147ae9aff9d7fd8644083d7d9f87ba9cabc524;hp=3a373ec1daa0b2a1e38f9e5c9162eaeb8e656f13;hpb=63a97e588b1f7a9d9533153058276af6e9abd2be;p=dpdk.git diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index 3a373ec1da..1deee2f5c8 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -38,6 +38,9 @@ #include "cxgbe_pfvf.h" #include "cxgbe_flow.h" +int cxgbe_logtype; +int cxgbe_mbox_logtype; + /* * Macros needed to support the PCI Device ID Table ... */ @@ -65,20 +68,22 @@ uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; uint16_t pkts_sent, pkts_remain; uint16_t total_sent = 0; + uint16_t idx = 0; int ret = 0; - CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n", - __func__, txq, tx_pkts, nb_pkts); - t4_os_lock(&txq->txq_lock); /* free up desc from already completed tx */ reclaim_completed_tx(&txq->q); + rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *)); while (total_sent < nb_pkts) { pkts_remain = nb_pkts - total_sent; for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) { - ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent], - nb_pkts); + idx = total_sent + pkts_sent; + if ((idx + 1) < nb_pkts) + rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1], + volatile void *)); + ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts); if (ret < 0) break; } @@ -99,17 +104,13 @@ uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue; unsigned int work_done; - CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n", - __func__, rxq->rspq.cntxt_id, nb_pkts); - if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done)) dev_err(adapter, "error in cxgbe poll\n"); - CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done); return work_done; } -void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, +int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *device_info) { struct port_info *pi = eth_dev->data->dev_private; @@ -144,46 +145,48 @@ void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, device_info->rx_desc_lim = cxgbe_desc_lim; device_info->tx_desc_lim = cxgbe_desc_lim; cxgbe_get_speed_caps(pi, &device_info->speed_capa); + + return 0; } -void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - 1, -1, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + 1, -1, 1, -1, false); } -void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - 0, -1, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + 0, -1, 1, -1, false); } -void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - -1, 1, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 1, 1, -1, false); } -void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - -1, 0, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 0, 1, -1, false); } int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, @@ -197,6 +200,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, u8 old_link = pi->link_cfg.link_ok; for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) { + if (!s->fw_evtq.desc) + break; + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); /* Exit if link status changed or always forced up */ @@ -230,6 +236,9 @@ int cxgbe_dev_set_link_up(struct rte_eth_dev *dev) struct sge *s = &adapter->sge; int ret; + if (!s->fw_evtq.desc) + return -ENOMEM; + /* Flush all link events */ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); @@ -256,6 +265,9 @@ int cxgbe_dev_set_link_down(struct rte_eth_dev *dev) struct sge *s = &adapter->sge; int ret; + if (!s->fw_evtq.desc) + return -ENOMEM; + /* Flush all link events */ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); @@ -279,7 +291,9 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) int err; uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; - cxgbe_dev_info_get(eth_dev, &dev_info); + err = cxgbe_dev_info_get(eth_dev, &dev_info); + if (err != 0) + return err; /* Must accommodate at least RTE_ETHER_MIN_MTU */ if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen) @@ -412,6 +426,10 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) CXGBE_FUNC_TRACE(); + if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_RSS_HASH; + if (!(adapter->flags & FW_QUEUE_BOUND)) { err = cxgbe_setup_sge_fwevtq(adapter); if (err) @@ -585,7 +603,12 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc, socket_id, mp); - cxgbe_dev_info_get(eth_dev, &dev_info); + err = cxgbe_dev_info_get(eth_dev, &dev_info); + if (err != 0) { + dev_err(adap, "%s: error during getting ethernet device info", + __func__); + return err; + } /* Must accommodate at least RTE_ETHER_MIN_MTU */ if ((pkt_len < dev_info.min_rx_bufsize) || @@ -713,7 +736,7 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, /* * Reset port statistics. */ -static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) +static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; @@ -736,6 +759,8 @@ static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) txq->stats.tx_bytes = 0; txq->stats.mapping_err = 0; } + + return 0; } static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, @@ -1169,6 +1194,8 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) adapter->eth_dev = eth_dev; pi->adapter = adapter; + cxgbe_process_devargs(adapter); + err = cxgbe_probe(adapter); if (err) { dev_err(adapter, "%s: cxgbe probe failed with err %d\n", @@ -1216,5 +1243,17 @@ RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl); RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe, - CXGBE_DEVARG_KEEP_OVLAN "=<0|1> " - CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> "); + CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> " + CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> " + CXGBE_DEVARG_PF_FILTER_MODE "= " + CXGBE_DEVARG_PF_FILTER_MASK "= "); + +RTE_INIT(cxgbe_init_log) +{ + cxgbe_logtype = rte_log_register("pmd.net.cxgbe"); + if (cxgbe_logtype >= 0) + rte_log_set_level(cxgbe_logtype, RTE_LOG_NOTICE); + cxgbe_mbox_logtype = rte_log_register("pmd.net.cxgbe.mbox"); + if (cxgbe_mbox_logtype >= 0) + rte_log_set_level(cxgbe_mbox_logtype, RTE_LOG_NOTICE); +}