#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue;
uint16_t pkts_sent, pkts_remain;
uint16_t total_sent = 0;
+ uint16_t idx = 0;
int ret = 0;
- CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n",
- __func__, txq, tx_pkts, nb_pkts);
-
t4_os_lock(&txq->txq_lock);
/* free up desc from already completed tx */
reclaim_completed_tx(&txq->q);
+ if (unlikely(!nb_pkts))
+ goto out_unlock;
+
+ rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
while (total_sent < nb_pkts) {
pkts_remain = nb_pkts - total_sent;
for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) {
- ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent],
- nb_pkts);
+ idx = total_sent + pkts_sent;
+ if ((idx + 1) < nb_pkts)
+ rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1],
+ volatile void *));
+ ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts);
if (ret < 0)
break;
}
reclaim_completed_tx(&txq->q);
}
+out_unlock:
t4_os_unlock(&txq->txq_lock);
return total_sent;
}
struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue;
unsigned int work_done;
- CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n",
- __func__, rxq->rspq.cntxt_id, nb_pkts);
-
if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done))
dev_err(adapter, "error in cxgbe poll\n");
- CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done);
return work_done;
}
-void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
+int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *device_info)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
- int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
static const struct rte_eth_desc_lim cxgbe_desc_lim = {
.nb_max = CXGBE_MAX_RING_DESC_SIZE,
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
- device_info->max_rx_queues = max_queues;
- device_info->max_tx_queues = max_queues;
+ device_info->max_rx_queues = adapter->sge.max_ethqsets;
+ device_info->max_tx_queues = adapter->sge.max_ethqsets;
device_info->max_mac_addrs = 1;
/* XXX: For now we support one MAC/port */
device_info->max_vfs = adapter->params.arch.vfcount;
device_info->rx_desc_lim = cxgbe_desc_lim;
device_info->tx_desc_lim = cxgbe_desc_lim;
cxgbe_get_speed_caps(pi, &device_info->speed_capa);
+
+ return 0;
}
-void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
- t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
- 1, -1, 1, -1, false);
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ 1, -1, 1, -1, false);
}
-void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
- t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
- 0, -1, 1, -1, false);
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ 0, -1, 1, -1, false);
}
-void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
/* TODO: address filters ?? */
- t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
- -1, 1, 1, -1, false);
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ -1, 1, 1, -1, false);
}
-void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
/* TODO: address filters ?? */
- t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
- -1, 0, 1, -1, false);
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1,
+ -1, 0, 1, -1, false);
}
int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
int wait_to_complete)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
+ unsigned int i, work_done, budget = 32;
+ struct link_config *lc = &pi->link_cfg;
struct adapter *adapter = pi->adapter;
- struct sge *s = &adapter->sge;
struct rte_eth_link new_link = { 0 };
- unsigned int i, work_done, budget = 32;
u8 old_link = pi->link_cfg.link_ok;
+ struct sge *s = &adapter->sge;
for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
+ if (!s->fw_evtq.desc)
+ break;
+
cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
/* Exit if link status changed or always forced up */
new_link.link_status = cxgbe_force_linkup(adapter) ?
ETH_LINK_UP : pi->link_cfg.link_ok;
- new_link.link_autoneg = pi->link_cfg.autoneg;
+ new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
- new_link.link_speed = pi->link_cfg.speed;
+ new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
return rte_eth_linkstatus_set(eth_dev, &new_link);
}
*/
int cxgbe_dev_set_link_up(struct rte_eth_dev *dev)
{
- struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct port_info *pi = dev->data->dev_private;
struct adapter *adapter = pi->adapter;
unsigned int work_done, budget = 32;
struct sge *s = &adapter->sge;
int ret;
+ if (!s->fw_evtq.desc)
+ return -ENOMEM;
+
/* Flush all link events */
cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
*/
int cxgbe_dev_set_link_down(struct rte_eth_dev *dev)
{
- struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct port_info *pi = dev->data->dev_private;
struct adapter *adapter = pi->adapter;
unsigned int work_done, budget = 32;
struct sge *s = &adapter->sge;
int ret;
+ if (!s->fw_evtq.desc)
+ return -ENOMEM;
+
/* Flush all link events */
cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done);
int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
struct rte_eth_dev_info dev_info;
int err;
- uint16_t new_mtu = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
- cxgbe_dev_info_get(eth_dev, &dev_info);
+ err = cxgbe_dev_info_get(eth_dev, &dev_info);
+ if (err != 0)
+ return err;
- /* Must accommodate at least ETHER_MIN_MTU */
- if ((new_mtu < ETHER_MIN_MTU) || (new_mtu > dev_info.max_rx_pktlen))
+ /* Must accommodate at least RTE_ETHER_MIN_MTU */
+ if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
return -EINVAL;
/* set to jumbo mode if needed */
- if (new_mtu > ETHER_MAX_LEN)
+ if (new_mtu > CXGBE_ETH_MAX_LEN)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
/*
* Stop device.
*/
-void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
+ u8 i;
CXGBE_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
if (!(adapter->flags & FULL_INIT_DONE))
- return;
+ return 0;
+
+ if (!pi->viid)
+ return 0;
cxgbe_down(pi);
+ t4_sge_eth_release_queues(pi);
+ t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
+ pi->viid = 0;
- /*
- * We clear queues only if both tx and rx path of the port
- * have been disabled
+ /* Free up the adapter-wide resources only after all the ports
+ * under this PF have been closed.
*/
- t4_sge_eth_clear_queues(pi);
+ for_each_port(adapter, i) {
+ temp_pi = adap2pinfo(adapter, i);
+ if (temp_pi->viid)
+ return 0;
+ }
+
+ cxgbe_close(adapter);
+ rte_free(adapter);
+
+ return 0;
}
/* Start the device.
*/
int cxgbe_dev_start(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct rte_eth_rxmode *rx_conf = ð_dev->data->dev_conf.rxmode;
struct adapter *adapter = pi->adapter;
int err = 0, i;
/*
* Stop device: disable rx and tx functions to allow for reconfiguring.
*/
-void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
CXGBE_FUNC_TRACE();
if (!(adapter->flags & FULL_INIT_DONE))
- return;
+ return 0;
cxgbe_down(pi);
*/
t4_sge_eth_clear_queues(pi);
eth_dev->data->scattered_rx = 0;
+
+ return 0;
}
int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
int err;
CXGBE_FUNC_TRACE();
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_RSS_HASH;
+
if (!(adapter->flags & FW_QUEUE_BOUND)) {
err = cxgbe_setup_sge_fwevtq(adapter);
if (err)
unsigned int socket_id,
const struct rte_eth_txconf *tx_conf __rte_unused)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
- struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
- int err = 0;
unsigned int temp_nb_desc;
+ struct sge_eth_txq *txq;
+ int err = 0;
+ txq = &s->ethtxq[pi->first_txqset + queue_idx];
dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
__func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
- socket_id, pi->first_qset);
+ socket_id, pi->first_txqset);
/* Free up the existing queue */
if (eth_dev->data->tx_queues[queue_idx]) {
int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- int ret;
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adap = pi->adapter;
- struct sge_rspq *q;
+ struct sge_eth_rxq *rxq;
+ int ret;
dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
__func__, pi->port_id, rx_queue_id);
- q = eth_dev->data->rx_queues[rx_queue_id];
-
- ret = t4_sge_eth_rxq_start(adap, q);
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ ret = t4_sge_eth_rxq_start(adap, rxq);
if (ret == 0)
eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- int ret;
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adap = pi->adapter;
- struct sge_rspq *q;
+ struct sge_eth_rxq *rxq;
+ int ret;
dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
__func__, pi->port_id, rx_queue_id);
- q = eth_dev->data->rx_queues[rx_queue_id];
- ret = t4_sge_eth_rxq_stop(adap, q);
+ rxq = eth_dev->data->rx_queues[rx_queue_id];
+ ret = t4_sge_eth_rxq_stop(adap, rxq);
if (ret == 0)
eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mp)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
+ struct rte_eth_dev_info dev_info;
struct sge *s = &adapter->sge;
- struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
- int err = 0;
- int msi_idx = 0;
unsigned int temp_nb_desc;
- struct rte_eth_dev_info dev_info;
- unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ int err = 0, msi_idx = 0;
+ struct sge_eth_rxq *rxq;
+ rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
__func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
socket_id, mp);
- cxgbe_dev_info_get(eth_dev, &dev_info);
+ err = cxgbe_dev_info_get(eth_dev, &dev_info);
+ if (err != 0) {
+ dev_err(adap, "%s: error during getting ethernet device info",
+ __func__);
+ return err;
+ }
- /* Must accommodate at least ETHER_MIN_MTU */
+ /* Must accommodate at least RTE_ETHER_MIN_MTU */
if ((pkt_len < dev_info.min_rx_bufsize) ||
(pkt_len > dev_info.max_rx_pktlen)) {
dev_err(adap, "%s: max pkt len must be > %d and <= %d\n",
rxq->fl.size = temp_nb_desc;
/* Set to jumbo mode if necessary */
- if (pkt_len > ETHER_MAX_LEN)
+ if (pkt_len > CXGBE_ETH_MAX_LEN)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
void cxgbe_dev_rx_queue_release(void *q)
{
struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
- struct sge_rspq *rq = &rxq->rspq;
- if (rq) {
+ if (rxq) {
struct port_info *pi = (struct port_info *)
- (rq->eth_dev->data->dev_private);
+ (rxq->rspq.eth_dev->data->dev_private);
struct adapter *adap = pi->adapter;
dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
struct rte_eth_stats *eth_stats)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
struct port_stats ps;
for (i = 0; i < pi->n_rx_qsets; i++) {
struct sge_eth_rxq *rxq =
- &s->ethrxq[pi->first_qset + i];
+ &s->ethrxq[pi->first_rxqset + i];
eth_stats->q_ipackets[i] = rxq->stats.pkts;
eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
for (i = 0; i < pi->n_tx_qsets; i++) {
struct sge_eth_txq *txq =
- &s->ethtxq[pi->first_qset + i];
+ &s->ethtxq[pi->first_txqset + i];
eth_stats->q_opackets[i] = txq->stats.pkts;
eth_stats->q_obytes[i] = txq->stats.tx_bytes;
- eth_stats->q_errors[i] = txq->stats.mapping_err;
}
return 0;
}
/*
* Reset port statistics.
*/
-static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
+static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
unsigned int i;
cxgbe_stats_reset(pi);
for (i = 0; i < pi->n_rx_qsets; i++) {
struct sge_eth_rxq *rxq =
- &s->ethrxq[pi->first_qset + i];
+ &s->ethrxq[pi->first_rxqset + i];
rxq->stats.pkts = 0;
rxq->stats.rx_bytes = 0;
}
for (i = 0; i < pi->n_tx_qsets; i++) {
struct sge_eth_txq *txq =
- &s->ethtxq[pi->first_qset + i];
+ &s->ethtxq[pi->first_txqset + i];
txq->stats.pkts = 0;
txq->stats.tx_bytes = 0;
txq->stats.mapping_err = 0;
}
+
+ return 0;
}
static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
struct rte_eth_fc_conf *fc_conf)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct link_config *lc = &pi->link_cfg;
- int rx_pause, tx_pause;
+ u8 rx_pause = 0, tx_pause = 0;
+ u32 caps = lc->link_caps;
+
+ if (caps & FW_PORT_CAP32_ANEG)
+ fc_conf->autoneg = 1;
- fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
- rx_pause = lc->fc & PAUSE_RX;
- tx_pause = lc->fc & PAUSE_TX;
+ if (caps & FW_PORT_CAP32_FC_TX)
+ tx_pause = 1;
+
+ if (caps & FW_PORT_CAP32_FC_RX)
+ rx_pause = 1;
if (rx_pause && tx_pause)
fc_conf->mode = RTE_FC_FULL;
static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
struct rte_eth_fc_conf *fc_conf)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
- struct adapter *adapter = pi->adapter;
+ struct port_info *pi = eth_dev->data->dev_private;
struct link_config *lc = &pi->link_cfg;
+ u32 new_caps = lc->admin_caps;
+ u8 tx_pause = 0, rx_pause = 0;
+ int ret;
- if (lc->pcaps & FW_PORT_CAP32_ANEG) {
- if (fc_conf->autoneg)
- lc->requested_fc |= PAUSE_AUTONEG;
- else
- lc->requested_fc &= ~PAUSE_AUTONEG;
+ if (fc_conf->mode == RTE_FC_FULL) {
+ tx_pause = 1;
+ rx_pause = 1;
+ } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+ tx_pause = 1;
+ } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+ rx_pause = 1;
}
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_RX_PAUSE))
- lc->requested_fc |= PAUSE_RX;
- else
- lc->requested_fc &= ~PAUSE_RX;
+ ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause,
+ rx_pause, &new_caps);
+ if (ret != 0)
+ return ret;
- if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
- (fc_conf->mode & RTE_FC_TX_PAUSE))
- lc->requested_fc |= PAUSE_TX;
- else
- lc->requested_fc &= ~PAUSE_TX;
+ if (!fc_conf->autoneg) {
+ if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
+ new_caps |= FW_PORT_CAP32_FORCE_PAUSE;
+ } else {
+ new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
+ }
+
+ if (new_caps != lc->admin_caps) {
+ ret = t4_link_l1cfg(pi, new_caps);
+ if (ret == 0)
+ lc->admin_caps = new_caps;
+ }
- return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
- &pi->link_cfg);
+ return ret;
}
const uint32_t *
static int cxgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct port_info *pi = dev->data->dev_private;
struct adapter *adapter = pi->adapter;
int err;
static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
- struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct port_info *pi = dev->data->dev_private;
struct adapter *adapter = pi->adapter;
u64 rss_hf = 0;
u64 flags = 0;
return 0;
}
+static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ u16 i, idx, shift, *rss;
+ int ret;
+
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return -ENOMEM;
+
+ if (!reta_size || reta_size > pi->rss_size)
+ return -EINVAL;
+
+ rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
+ if (!rss)
+ return -ENOMEM;
+
+ rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (!(reta_conf[idx].mask & (1ULL << shift)))
+ continue;
+
+ rss[i] = reta_conf[idx].reta[shift];
+ }
+
+ ret = cxgbe_write_rss(pi, rss);
+ if (!ret)
+ rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
+
+ rte_free(rss);
+ return ret;
+}
+
+static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+ struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct adapter *adapter = pi->adapter;
+ u16 i, idx, shift;
+
+ if (!(adapter->flags & FULL_INIT_DONE))
+ return -ENOMEM;
+
+ if (!reta_size || reta_size > pi->rss_size)
+ return -EINVAL;
+
+ for (i = 0; i < reta_size; i++) {
+ idx = i / RTE_RETA_GROUP_SIZE;
+ shift = i % RTE_RETA_GROUP_SIZE;
+ if (!(reta_conf[idx].mask & (1ULL << shift)))
+ continue;
+
+ reta_conf[idx].reta[shift] = pi->rss[i];
+ }
+
+ return 0;
+}
+
static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
{
RTE_SET_USED(dev);
static int cxgbe_get_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *e)
{
- struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct port_info *pi = dev->data->dev_private;
struct adapter *adapter = pi->adapter;
u32 i, err = 0;
u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0);
static int cxgbe_set_eeprom(struct rte_eth_dev *dev,
struct rte_dev_eeprom_info *eeprom)
{
- struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct port_info *pi = dev->data->dev_private;
struct adapter *adapter = pi->adapter;
u8 *buf;
int err = 0;
static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
return t4_get_regs_len(adapter) / sizeof(uint32_t);
static int cxgbe_get_regs(struct rte_eth_dev *eth_dev,
struct rte_dev_reg_info *regs)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) |
int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
{
- struct port_info *pi = (struct port_info *)(dev->data->dev_private);
+ struct port_info *pi = dev->data->dev_private;
int ret;
ret = cxgbe_mpstcam_modify(pi, (int)pi->xact_addr_filt, (u8 *)addr);
return 0;
}
+static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
+ struct rte_eth_fec_capa *capa_arr)
+{
+ int num = 0;
+
+ if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
+ if (capa_arr) {
+ capa_arr[num].speed = ETH_SPEED_NUM_100G;
+ capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+ }
+ num++;
+ }
+
+ if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
+ if (capa_arr) {
+ capa_arr[num].speed = ETH_SPEED_NUM_50G;
+ capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+ }
+ num++;
+ }
+
+ if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
+ if (capa_arr) {
+ capa_arr[num].speed = ETH_SPEED_NUM_25G;
+ capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
+ RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+ }
+ num++;
+ }
+
+ return num;
+}
+
+static int cxgbe_fec_get_capability(struct rte_eth_dev *dev,
+ struct rte_eth_fec_capa *speed_fec_capa,
+ unsigned int num)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct link_config *lc = &pi->link_cfg;
+ u8 num_entries;
+
+ if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
+ return -EOPNOTSUPP;
+
+ num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL);
+ if (!speed_fec_capa || num < num_entries)
+ return num_entries;
+
+ return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa);
+}
+
+static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
+{
+ struct port_info *pi = dev->data->dev_private;
+ struct link_config *lc = &pi->link_cfg;
+ u32 fec_caps = 0, caps = lc->link_caps;
+
+ if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
+ return -EOPNOTSUPP;
+
+ if (caps & FW_PORT_CAP32_FEC_RS)
+ fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+ else if (caps & FW_PORT_CAP32_FEC_BASER_RS)
+ fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+ else
+ fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+
+ *fec_capa = fec_caps;
+ return 0;
+}
+
+static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
+{
+ struct port_info *pi = dev->data->dev_private;
+ u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
+ struct link_config *lc = &pi->link_cfg;
+ u32 new_caps = lc->admin_caps;
+ int ret;
+
+ if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
+ return -EOPNOTSUPP;
+
+ if (!fec_capa)
+ return -EINVAL;
+
+ if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
+ goto set_fec;
+
+ if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC))
+ fec_none = 1;
+
+ if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
+ fec_baser = 1;
+
+ if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
+ fec_rs = 1;
+
+set_fec:
+ ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps);
+ if (ret != 0)
+ return ret;
+
+ if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
+ new_caps |= FW_PORT_CAP32_FORCE_FEC;
+ else
+ new_caps &= ~FW_PORT_CAP32_FORCE_FEC;
+
+ if (new_caps != lc->admin_caps) {
+ ret = t4_link_l1cfg(pi, new_caps);
+ if (ret == 0)
+ lc->admin_caps = new_caps;
+ }
+
+ return ret;
+}
+
static const struct eth_dev_ops cxgbe_eth_dev_ops = {
.dev_start = cxgbe_dev_start,
.dev_stop = cxgbe_dev_stop,
.rx_queue_start = cxgbe_dev_rx_queue_start,
.rx_queue_stop = cxgbe_dev_rx_queue_stop,
.rx_queue_release = cxgbe_dev_rx_queue_release,
- .filter_ctrl = cxgbe_dev_filter_ctrl,
+ .flow_ops_get = cxgbe_dev_flow_ops_get,
.stats_get = cxgbe_dev_stats_get,
.stats_reset = cxgbe_dev_stats_reset,
.flow_ctrl_get = cxgbe_flow_ctrl_get,
.rss_hash_update = cxgbe_dev_rss_hash_update,
.rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get,
.mac_addr_set = cxgbe_mac_addr_set,
+ .reta_update = cxgbe_dev_rss_reta_update,
+ .reta_query = cxgbe_dev_rss_reta_query,
+ .fec_get_capability = cxgbe_fec_get_capability,
+ .fec_get = cxgbe_fec_get,
+ .fec_set = cxgbe_fec_set,
};
/*
static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
{
struct rte_pci_device *pci_dev;
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = NULL;
char name[RTE_ETH_NAME_MAX_LEN];
int err = 0;
return 0;
}
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
adapter = rte_zmalloc(name, sizeof(*adapter), 0);
if (!adapter)
adapter->eth_dev = eth_dev;
pi->adapter = adapter;
+ cxgbe_process_devargs(adapter);
+
err = cxgbe_probe(adapter);
if (err) {
dev_err(adapter, "%s: cxgbe probe failed with err %d\n",
static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
- struct adapter *adap = pi->adapter;
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ uint16_t port_id;
+ int err = 0;
/* Free up other ports and all resources */
- cxgbe_close(adap);
- return 0;
+ RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
+ err |= rte_eth_dev_close(port_id);
+
+ return err == 0 ? 0 : -EIO;
}
static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
- CXGBE_DEVARG_KEEP_OVLAN "=<0|1> "
- CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> ");
+ CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
+ CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
+ CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
+ CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
+RTE_LOG_REGISTER(cxgbe_logtype, pmd.net.cxgbe, NOTICE);
+RTE_LOG_REGISTER(cxgbe_mbox_logtype, pmd.net.cxgbe.mbox, NOTICE);