X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcxgbe%2Fcxgbe_ethdev.c;h=51b63ef574677102ed60e06ef74f43585f0fb689;hb=8cb6b561eac42261ecbeec9a8de8322cd6deb699;hp=f039364876bfa29b2929525e1809bb2550b41b72;hpb=880ead4e14a4dcecb04a361bcff3a9ce82c2c5ae;p=dpdk.git diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index f039364876..51b63ef574 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2014-2017 Chelsio Communications. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Chelsio Communications nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2014-2018 Chelsio Communications. + * All rights reserved. */ #include @@ -64,6 +36,10 @@ #include "cxgbe.h" #include "cxgbe_pfvf.h" +#include "cxgbe_flow.h" + +int cxgbe_logtype; +int cxgbe_mbox_logtype; /* * Macros needed to support the PCI Device ID Table ... @@ -84,7 +60,7 @@ /* *... and the PCI ID Table itself ... */ -#include "t4_pci_id_tbl.h" +#include "base/t4_pci_id_tbl.h" uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -92,20 +68,22 @@ uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; uint16_t pkts_sent, pkts_remain; uint16_t total_sent = 0; + uint16_t idx = 0; int ret = 0; - CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n", - __func__, txq, tx_pkts, nb_pkts); - t4_os_lock(&txq->txq_lock); /* free up desc from already completed tx */ reclaim_completed_tx(&txq->q); + rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *)); while (total_sent < nb_pkts) { pkts_remain = nb_pkts - total_sent; for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) { - ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent], - nb_pkts); + idx = total_sent + pkts_sent; + if ((idx + 1) < nb_pkts) + rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1], + volatile void *)); + ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts); if (ret < 0) break; } @@ -126,20 +104,16 @@ uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue; unsigned int work_done; - CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n", - __func__, rxq->rspq.cntxt_id, nb_pkts); - if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done)) dev_err(adapter, "error in cxgbe poll\n"); - CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done); return work_done; } -void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, +int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *device_info) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; int max_queues = adapter->sge.max_ethqsets / adapter->params.nports; @@ -149,8 +123,6 @@ void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, .nb_align = 1, }; - device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); - device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE; device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN; device_info->max_rx_queues = max_queues; @@ -160,16 +132,11 @@ void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, device_info->max_vfs = adapter->params.arch.vfcount; device_info->max_vmdq_pools = 0; /* XXX: For now no support for VMDQ */ - device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM; + device_info->rx_queue_offload_capa = 0UL; + device_info->rx_offload_capa = CXGBE_RX_OFFLOADS; - device_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_TCP_TSO; + device_info->tx_queue_offload_capa = 0UL; + device_info->tx_offload_capa = CXGBE_TX_OFFLOADS; device_info->reta_size = pi->rss_size; device_info->hash_key_size = CXGBE_DEFAULT_RSS_KEY_LEN; @@ -178,88 +145,167 @@ void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, device_info->rx_desc_lim = cxgbe_desc_lim; device_info->tx_desc_lim = cxgbe_desc_lim; cxgbe_get_speed_caps(pi, &device_info->speed_capa); + + return 0; } -void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - 1, -1, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + 1, -1, 1, -1, false); } -void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - 0, -1, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + 0, -1, 1, -1, false); } -void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - -1, 1, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 1, 1, -1, false); } -void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - -1, 0, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 0, 1, -1, false); } int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, - __rte_unused int wait_to_complete) + int wait_to_complete) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adapter = pi->adapter; + struct sge *s = &adapter->sge; + struct rte_eth_link new_link = { 0 }; + unsigned int i, work_done, budget = 32; + u8 old_link = pi->link_cfg.link_ok; + + for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) { + if (!s->fw_evtq.desc) + break; + + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); + + /* Exit if link status changed or always forced up */ + if (pi->link_cfg.link_ok != old_link || + cxgbe_force_linkup(adapter)) + break; + + if (!wait_to_complete) + break; + + rte_delay_ms(CXGBE_LINK_STATUS_POLL_MS); + } + + new_link.link_status = cxgbe_force_linkup(adapter) ? + ETH_LINK_UP : pi->link_cfg.link_ok; + new_link.link_autoneg = pi->link_cfg.autoneg; + new_link.link_duplex = ETH_LINK_FULL_DUPLEX; + new_link.link_speed = pi->link_cfg.speed; + + return rte_eth_linkstatus_set(eth_dev, &new_link); +} + +/** + * Set device link up. + */ +int cxgbe_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + unsigned int work_done, budget = 32; + struct sge *s = &adapter->sge; + int ret; + + if (!s->fw_evtq.desc) + return -ENOMEM; + + /* Flush all link events */ + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); + + /* If link already up, nothing to do */ + if (pi->link_cfg.link_ok) + return 0; + + ret = cxgbe_set_link_status(pi, true); + if (ret) + return ret; + + cxgbe_dev_link_update(dev, 1); + return 0; +} + +/** + * Set device link down. + */ +int cxgbe_dev_set_link_down(struct rte_eth_dev *dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; + unsigned int work_done, budget = 32; struct sge *s = &adapter->sge; - struct rte_eth_link *old_link = ð_dev->data->dev_link; - unsigned int work_done, budget = 4; + int ret; + if (!s->fw_evtq.desc) + return -ENOMEM; + + /* Flush all link events */ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); - if (old_link->link_status == pi->link_cfg.link_ok) - return -1; /* link not changed */ - eth_dev->data->dev_link.link_status = pi->link_cfg.link_ok; - eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX; - eth_dev->data->dev_link.link_speed = pi->link_cfg.speed; + /* If link already down, nothing to do */ + if (!pi->link_cfg.link_ok) + return 0; + + ret = cxgbe_set_link_status(pi, false); + if (ret) + return ret; - /* link has changed */ + cxgbe_dev_link_update(dev, 0); return 0; } int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct rte_eth_dev_info dev_info; int err; - uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; - cxgbe_dev_info_get(eth_dev, &dev_info); + err = cxgbe_dev_info_get(eth_dev, &dev_info); + if (err != 0) + return err; - /* Must accommodate at least ETHER_MIN_MTU */ - if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen)) + /* Must accommodate at least RTE_ETHER_MIN_MTU */ + if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen) return -EINVAL; /* set to jumbo mode if needed */ - if (new_mtu > ETHER_MAX_LEN) - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + if (new_mtu > RTE_ETHER_MAX_LEN) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1, -1, -1, true); @@ -274,9 +320,8 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) */ void cxgbe_dev_close(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; - int i, dev_down = 0; CXGBE_FUNC_TRACE(); @@ -290,22 +335,6 @@ void cxgbe_dev_close(struct rte_eth_dev *eth_dev) * have been disabled */ t4_sge_eth_clear_queues(pi); - - /* See if all ports are down */ - for_each_port(adapter, i) { - pi = adap2pinfo(adapter, i); - /* - * Skip first port of the adapter since it will be closed - * by DPDK - */ - if (i == 0) - continue; - dev_down += (pi->eth_dev->data->dev_started == 0) ? 1 : 0; - } - - /* If rest of the ports are stopped, then free up resources */ - if (dev_down == (adapter->params.nports - 1)) - cxgbe_close(adapter); } /* Start the device. @@ -313,7 +342,8 @@ void cxgbe_dev_close(struct rte_eth_dev *eth_dev) */ int cxgbe_dev_start(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; + struct rte_eth_rxmode *rx_conf = ð_dev->data->dev_conf.rxmode; struct adapter *adapter = pi->adapter; int err = 0, i; @@ -334,9 +364,14 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev) goto out; } + if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) + eth_dev->data->scattered_rx = 1; + else + eth_dev->data->scattered_rx = 0; + cxgbe_enable_rx_queues(pi); - err = setup_rss(pi); + err = cxgbe_setup_rss(pi); if (err) goto out; @@ -352,7 +387,7 @@ int cxgbe_dev_start(struct rte_eth_dev *eth_dev) goto out; } - err = link_start(pi); + err = cxgbe_link_start(pi); if (err) goto out; @@ -365,7 +400,7 @@ out: */ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; CXGBE_FUNC_TRACE(); @@ -380,24 +415,34 @@ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) * have been disabled */ t4_sge_eth_clear_queues(pi); + eth_dev->data->scattered_rx = 0; } int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; int err; CXGBE_FUNC_TRACE(); + if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_RSS_HASH; + if (!(adapter->flags & FW_QUEUE_BOUND)) { - err = setup_sge_fwevtq(adapter); + err = cxgbe_setup_sge_fwevtq(adapter); if (err) return err; adapter->flags |= FW_QUEUE_BOUND; + if (is_pf4(adapter)) { + err = cxgbe_setup_sge_ctrl_txq(adapter); + if (err) + return err; + } } - err = cfg_queue_count(eth_dev); + err = cxgbe_cfg_queue_count(eth_dev); if (err) return err; @@ -437,17 +482,15 @@ int cxgbe_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf __rte_unused) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx]; int err = 0; unsigned int temp_nb_desc; - RTE_SET_USED(tx_conf); - dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n", __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc, socket_id, pi->first_qset); @@ -506,7 +549,7 @@ void cxgbe_dev_tx_queue_release(void *q) int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { int ret; - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; struct sge_rspq *q; @@ -525,7 +568,7 @@ int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { int ret; - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; struct sge_rspq *q; @@ -543,10 +586,10 @@ int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_rxconf *rx_conf, + const struct rte_eth_rxconf *rx_conf __rte_unused, struct rte_mempool *mp) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx]; @@ -556,15 +599,18 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info dev_info; unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; - RTE_SET_USED(rx_conf); - dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n", __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc, socket_id, mp); - cxgbe_dev_info_get(eth_dev, &dev_info); + err = cxgbe_dev_info_get(eth_dev, &dev_info); + if (err != 0) { + dev_err(adap, "%s: error during getting ethernet device info", + __func__); + return err; + } - /* Must accommodate at least ETHER_MIN_MTU */ + /* Must accommodate at least RTE_ETHER_MIN_MTU */ if ((pkt_len < dev_info.min_rx_bufsize) || (pkt_len > dev_info.max_rx_pktlen)) { dev_err(adap, "%s: max pkt len must be > %d and <= %d\n", @@ -603,13 +649,15 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, rxq->fl.size = temp_nb_desc; /* Set to jumbo mode if necessary */ - if (pkt_len > ETHER_MAX_LEN) - eth_dev->data->dev_conf.rxmode.jumbo_frame = 1; + if (pkt_len > RTE_ETHER_MAX_LEN) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - eth_dev->data->dev_conf.rxmode.jumbo_frame = 0; + eth_dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx, - &rxq->fl, t4_ethrx_handler, + &rxq->fl, NULL, is_pf4(adapter) ? t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp, queue_idx, socket_id); @@ -643,7 +691,7 @@ void cxgbe_dev_rx_queue_release(void *q) static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; struct port_stats ps; @@ -681,7 +729,6 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, eth_stats->q_opackets[i] = txq->stats.pkts; eth_stats->q_obytes[i] = txq->stats.tx_bytes; - eth_stats->q_errors[i] = txq->stats.mapping_err; } return 0; } @@ -689,9 +736,9 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, /* * Reset port statistics. */ -static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) +static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; unsigned int i; @@ -712,12 +759,14 @@ static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) txq->stats.tx_bytes = 0; txq->stats.mapping_err = 0; } + + return 0; } static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct link_config *lc = &pi->link_cfg; int rx_pause, tx_pause; @@ -739,7 +788,7 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct link_config *lc = &pi->link_cfg; @@ -785,7 +834,7 @@ cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; int err; @@ -815,7 +864,7 @@ static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev, static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; u64 rss_hf = 0; u64 flags = 0; @@ -828,13 +877,13 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, return err; if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) { - rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + rss_hf |= CXGBE_RSS_HF_TCP_IPV6_MASK; if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN) - rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + rss_hf |= CXGBE_RSS_HF_UDP_IPV6_MASK; } if (flags & F_FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN) - rss_hf |= ETH_RSS_IPV6; + rss_hf |= CXGBE_RSS_HF_IPV6_MASK; if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) { rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; @@ -843,7 +892,7 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, } if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN) - rss_hf |= ETH_RSS_IPV4; + rss_hf |= CXGBE_RSS_HF_IPV4_MASK; rss_conf->rss_hf = rss_hf; @@ -924,7 +973,7 @@ static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) static int cxgbe_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *e) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; u32 i, err = 0; u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0); @@ -945,7 +994,7 @@ static int cxgbe_get_eeprom(struct rte_eth_dev *dev, static int cxgbe_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom) { - struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct port_info *pi = dev->data->dev_private; struct adapter *adapter = pi->adapter; u8 *buf; int err = 0; @@ -1003,7 +1052,7 @@ out: static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; return t4_get_regs_len(adapter) / sizeof(uint32_t); @@ -1012,7 +1061,7 @@ static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev) static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, struct rte_dev_reg_info *regs) { - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) | @@ -1031,6 +1080,21 @@ static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, return 0; } +int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr) +{ + struct port_info *pi = dev->data->dev_private; + int ret; + + ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr); + if (ret < 0) { + dev_err(adapter, "failed to set mac addr; err = %d\n", + ret); + return ret; + } + pi->xact_addr_filt = ret; + return 0; +} + static const struct eth_dev_ops cxgbe_eth_dev_ops = { .dev_start = cxgbe_dev_start, .dev_stop = cxgbe_dev_stop, @@ -1043,6 +1107,8 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .dev_infos_get = cxgbe_dev_info_get, .dev_supported_ptypes_get = cxgbe_dev_supported_ptypes_get, .link_update = cxgbe_dev_link_update, + .dev_set_link_up = cxgbe_dev_set_link_up, + .dev_set_link_down = cxgbe_dev_set_link_down, .mtu_set = cxgbe_dev_mtu_set, .tx_queue_setup = cxgbe_dev_tx_queue_setup, .tx_queue_start = cxgbe_dev_tx_queue_start, @@ -1052,6 +1118,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .rx_queue_start = cxgbe_dev_rx_queue_start, .rx_queue_stop = cxgbe_dev_rx_queue_stop, .rx_queue_release = cxgbe_dev_rx_queue_release, + .filter_ctrl = cxgbe_dev_filter_ctrl, .stats_get = cxgbe_dev_stats_get, .stats_reset = cxgbe_dev_stats_reset, .flow_ctrl_get = cxgbe_flow_ctrl_get, @@ -1062,6 +1129,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .get_reg = cxgbe_get_regs, .rss_hash_update = cxgbe_dev_rss_hash_update, .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get, + .mac_addr_set = cxgbe_mac_addr_set, }; /* @@ -1071,7 +1139,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) { struct rte_pci_device *pci_dev; - struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = NULL; char name[RTE_ETH_NAME_MAX_LEN]; int err = 0; @@ -1104,6 +1172,7 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst; rest_eth_dev->tx_pkt_burst = eth_dev->tx_pkt_burst; + rte_eth_dev_probing_finish(rest_eth_dev); } } return 0; @@ -1125,6 +1194,8 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) adapter->eth_dev = eth_dev; pi->adapter = adapter; + cxgbe_process_devargs(adapter); + err = cxgbe_probe(adapter); if (err) { dev_err(adapter, "%s: cxgbe probe failed with err %d\n", @@ -1139,6 +1210,16 @@ out_free_adapter: return err; } +static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = eth_dev->data->dev_private; + struct adapter *adap = pi->adapter; + + /* Free up other ports and all resources */ + cxgbe_close(adap); + return 0; +} + static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, struct rte_pci_device *pci_dev) { @@ -1148,7 +1229,7 @@ static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, static int eth_cxgbe_pci_remove(struct rte_pci_device *pci_dev) { - return rte_eth_dev_pci_generic_remove(pci_dev, NULL); + return rte_eth_dev_pci_generic_remove(pci_dev, eth_cxgbe_dev_uninit); } static struct rte_pci_driver rte_cxgbe_pmd = { @@ -1161,3 +1242,16 @@ static struct rte_pci_driver rte_cxgbe_pmd = { RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl); RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci"); +RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe, + CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> " + CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "); + +RTE_INIT(cxgbe_init_log) +{ + cxgbe_logtype = rte_log_register("pmd.net.cxgbe"); + if (cxgbe_logtype >= 0) + rte_log_set_level(cxgbe_logtype, RTE_LOG_NOTICE); + cxgbe_mbox_logtype = rte_log_register("pmd.net.cxgbe.mbox"); + if (cxgbe_mbox_logtype >= 0) + rte_log_set_level(cxgbe_mbox_logtype, RTE_LOG_NOTICE); +}