X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcxgbe%2Fcxgbe_ethdev.c;h=a12a98157c875762e5bbfd1f2000d33401bf5767;hb=520e3f4888c508dad32da1d8c5486a7be9b0fbba;hp=3a373ec1daa0b2a1e38f9e5c9162eaeb8e656f13;hpb=63a97e588b1f7a9d9533153058276af6e9abd2be;p=dpdk.git diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index 3a373ec1da..a12a98157c 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -28,8 +28,8 @@ #include #include #include -#include -#include +#include +#include #include #include #include @@ -65,20 +65,25 @@ uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct sge_eth_txq *txq = (struct sge_eth_txq *)tx_queue; uint16_t pkts_sent, pkts_remain; uint16_t total_sent = 0; + uint16_t idx = 0; int ret = 0; - CXGBE_DEBUG_TX(adapter, "%s: txq = %p; tx_pkts = %p; nb_pkts = %d\n", - __func__, txq, tx_pkts, nb_pkts); - t4_os_lock(&txq->txq_lock); /* free up desc from already completed tx */ reclaim_completed_tx(&txq->q); + if (unlikely(!nb_pkts)) + goto out_unlock; + + rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *)); while (total_sent < nb_pkts) { pkts_remain = nb_pkts - total_sent; for (pkts_sent = 0; pkts_sent < pkts_remain; pkts_sent++) { - ret = t4_eth_xmit(txq, tx_pkts[total_sent + pkts_sent], - nb_pkts); + idx = total_sent + pkts_sent; + if ((idx + 1) < nb_pkts) + rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[idx + 1], + volatile void *)); + ret = t4_eth_xmit(txq, tx_pkts[idx], nb_pkts); if (ret < 0) break; } @@ -89,6 +94,7 @@ uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, reclaim_completed_tx(&txq->q); } +out_unlock: t4_os_unlock(&txq->txq_lock); return total_sent; } @@ -99,22 +105,17 @@ uint16_t cxgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)rx_queue; unsigned int work_done; - CXGBE_DEBUG_RX(adapter, "%s: rxq->rspq.cntxt_id = %u; nb_pkts = %d\n", - __func__, rxq->rspq.cntxt_id, nb_pkts); - if (cxgbe_poll(&rxq->rspq, rx_pkts, (unsigned int)nb_pkts, &work_done)) dev_err(adapter, "error in cxgbe poll\n"); - CXGBE_DEBUG_RX(adapter, "%s: work_done = %u\n", __func__, work_done); return work_done; } -void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, +int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *device_info) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; - int max_queues = adapter->sge.max_ethqsets / adapter->params.nports; static const struct rte_eth_desc_lim cxgbe_desc_lim = { .nb_max = CXGBE_MAX_RING_DESC_SIZE, @@ -124,8 +125,8 @@ void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE; device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN; - device_info->max_rx_queues = max_queues; - device_info->max_tx_queues = max_queues; + device_info->max_rx_queues = adapter->sge.max_ethqsets; + device_info->max_tx_queues = adapter->sge.max_ethqsets; device_info->max_mac_addrs = 1; /* XXX: For now we support one MAC/port */ device_info->max_vfs = adapter->params.arch.vfcount; @@ -144,59 +145,65 @@ void cxgbe_dev_info_get(struct rte_eth_dev *eth_dev, device_info->rx_desc_lim = cxgbe_desc_lim; device_info->tx_desc_lim = cxgbe_desc_lim; cxgbe_get_speed_caps(pi, &device_info->speed_capa); + + return 0; } -void cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - 1, -1, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + 1, -1, 1, -1, false); } -void cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_promiscuous_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - 0, -1, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + 0, -1, 1, -1, false); } -void cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_allmulticast_enable(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - -1, 1, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 1, 1, -1, false); } -void cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) +int cxgbe_dev_allmulticast_disable(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; /* TODO: address filters ?? */ - t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, - -1, 0, 1, -1, false); + return t4_set_rxmode(adapter, adapter->mbox, pi->viid, -1, + -1, 0, 1, -1, false); } int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, int wait_to_complete) { struct port_info *pi = eth_dev->data->dev_private; + unsigned int i, work_done, budget = 32; + struct link_config *lc = &pi->link_cfg; struct adapter *adapter = pi->adapter; - struct sge *s = &adapter->sge; struct rte_eth_link new_link = { 0 }; - unsigned int i, work_done, budget = 32; u8 old_link = pi->link_cfg.link_ok; + struct sge *s = &adapter->sge; for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) { + if (!s->fw_evtq.desc) + break; + cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); /* Exit if link status changed or always forced up */ @@ -212,9 +219,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev, new_link.link_status = cxgbe_force_linkup(adapter) ? ETH_LINK_UP : pi->link_cfg.link_ok; - new_link.link_autoneg = pi->link_cfg.autoneg; + new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0; new_link.link_duplex = ETH_LINK_FULL_DUPLEX; - new_link.link_speed = pi->link_cfg.speed; + new_link.link_speed = t4_fwcap_to_speed(lc->link_caps); return rte_eth_linkstatus_set(eth_dev, &new_link); } @@ -230,6 +237,9 @@ int cxgbe_dev_set_link_up(struct rte_eth_dev *dev) struct sge *s = &adapter->sge; int ret; + if (!s->fw_evtq.desc) + return -ENOMEM; + /* Flush all link events */ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); @@ -256,6 +266,9 @@ int cxgbe_dev_set_link_down(struct rte_eth_dev *dev) struct sge *s = &adapter->sge; int ret; + if (!s->fw_evtq.desc) + return -ENOMEM; + /* Flush all link events */ cxgbe_poll(&s->fw_evtq, NULL, budget, &work_done); @@ -279,14 +292,16 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) int err; uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; - cxgbe_dev_info_get(eth_dev, &dev_info); + err = cxgbe_dev_info_get(eth_dev, &dev_info); + if (err != 0) + return err; /* Must accommodate at least RTE_ETHER_MIN_MTU */ if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen) return -EINVAL; /* set to jumbo mode if needed */ - if (new_mtu > RTE_ETHER_MAX_LEN) + if (new_mtu > CXGBE_ETH_MAX_LEN) eth_dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else @@ -304,23 +319,41 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) /* * Stop device. */ -void cxgbe_dev_close(struct rte_eth_dev *eth_dev) +int cxgbe_dev_close(struct rte_eth_dev *eth_dev) { - struct port_info *pi = eth_dev->data->dev_private; + struct port_info *temp_pi, *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; + u8 i; CXGBE_FUNC_TRACE(); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + if (!(adapter->flags & FULL_INIT_DONE)) - return; + return 0; + + if (!pi->viid) + return 0; cxgbe_down(pi); + t4_sge_eth_release_queues(pi); + t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid); + pi->viid = 0; - /* - * We clear queues only if both tx and rx path of the port - * have been disabled + /* Free up the adapter-wide resources only after all the ports + * under this PF have been closed. */ - t4_sge_eth_clear_queues(pi); + for_each_port(adapter, i) { + temp_pi = adap2pinfo(adapter, i); + if (temp_pi->viid) + return 0; + } + + cxgbe_close(adapter); + rte_free(adapter); + + return 0; } /* Start the device. @@ -384,7 +417,7 @@ out: /* * Stop device: disable rx and tx functions to allow for reconfiguring. */ -void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) +int cxgbe_dev_stop(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; @@ -392,7 +425,7 @@ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) CXGBE_FUNC_TRACE(); if (!(adapter->flags & FULL_INIT_DONE)) - return; + return 0; cxgbe_down(pi); @@ -402,6 +435,8 @@ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev) */ t4_sge_eth_clear_queues(pi); eth_dev->data->scattered_rx = 0; + + return 0; } int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) @@ -412,6 +447,10 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev) CXGBE_FUNC_TRACE(); + if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + eth_dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_RSS_HASH; + if (!(adapter->flags & FW_QUEUE_BOUND)) { err = cxgbe_setup_sge_fwevtq(adapter); if (err) @@ -469,13 +508,14 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; struct sge *s = &adapter->sge; - struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx]; - int err = 0; unsigned int temp_nb_desc; + struct sge_eth_txq *txq; + int err = 0; + txq = &s->ethtxq[pi->first_txqset + queue_idx]; dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n", __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc, - socket_id, pi->first_qset); + socket_id, pi->first_txqset); /* Free up the existing queue */ if (eth_dev->data->tx_queues[queue_idx]) { @@ -530,17 +570,16 @@ void cxgbe_dev_tx_queue_release(void *q) int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { - int ret; struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; - struct sge_rspq *q; + struct sge_eth_rxq *rxq; + int ret; dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", __func__, pi->port_id, rx_queue_id); - q = eth_dev->data->rx_queues[rx_queue_id]; - - ret = t4_sge_eth_rxq_start(adap, q); + rxq = eth_dev->data->rx_queues[rx_queue_id]; + ret = t4_sge_eth_rxq_start(adap, rxq); if (ret == 0) eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; @@ -549,16 +588,16 @@ int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) { - int ret; struct port_info *pi = eth_dev->data->dev_private; struct adapter *adap = pi->adapter; - struct sge_rspq *q; + struct sge_eth_rxq *rxq; + int ret; dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", __func__, pi->port_id, rx_queue_id); - q = eth_dev->data->rx_queues[rx_queue_id]; - ret = t4_sge_eth_rxq_stop(adap, q); + rxq = eth_dev->data->rx_queues[rx_queue_id]; + ret = t4_sge_eth_rxq_stop(adap, rxq); if (ret == 0) eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; @@ -571,21 +610,26 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, const struct rte_eth_rxconf *rx_conf __rte_unused, struct rte_mempool *mp) { + unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; + struct rte_eth_dev_info dev_info; struct sge *s = &adapter->sge; - struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx]; - int err = 0; - int msi_idx = 0; unsigned int temp_nb_desc; - struct rte_eth_dev_info dev_info; - unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; + int err = 0, msi_idx = 0; + struct sge_eth_rxq *rxq; + rxq = &s->ethrxq[pi->first_rxqset + queue_idx]; dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n", __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc, socket_id, mp); - cxgbe_dev_info_get(eth_dev, &dev_info); + err = cxgbe_dev_info_get(eth_dev, &dev_info); + if (err != 0) { + dev_err(adap, "%s: error during getting ethernet device info", + __func__); + return err; + } /* Must accommodate at least RTE_ETHER_MIN_MTU */ if ((pkt_len < dev_info.min_rx_bufsize) || @@ -626,7 +670,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, rxq->fl.size = temp_nb_desc; /* Set to jumbo mode if necessary */ - if (pkt_len > RTE_ETHER_MAX_LEN) + if (pkt_len > CXGBE_ETH_MAX_LEN) eth_dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else @@ -648,11 +692,10 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, void cxgbe_dev_rx_queue_release(void *q) { struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q; - struct sge_rspq *rq = &rxq->rspq; - if (rq) { + if (rxq) { struct port_info *pi = (struct port_info *) - (rq->eth_dev->data->dev_private); + (rxq->rspq.eth_dev->data->dev_private); struct adapter *adap = pi->adapter; dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n", @@ -692,7 +735,7 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, for (i = 0; i < pi->n_rx_qsets; i++) { struct sge_eth_rxq *rxq = - &s->ethrxq[pi->first_qset + i]; + &s->ethrxq[pi->first_rxqset + i]; eth_stats->q_ipackets[i] = rxq->stats.pkts; eth_stats->q_ibytes[i] = rxq->stats.rx_bytes; @@ -702,7 +745,7 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, for (i = 0; i < pi->n_tx_qsets; i++) { struct sge_eth_txq *txq = - &s->ethtxq[pi->first_qset + i]; + &s->ethtxq[pi->first_txqset + i]; eth_stats->q_opackets[i] = txq->stats.pkts; eth_stats->q_obytes[i] = txq->stats.tx_bytes; @@ -713,7 +756,7 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, /* * Reset port statistics. */ -static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) +static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) { struct port_info *pi = eth_dev->data->dev_private; struct adapter *adapter = pi->adapter; @@ -723,19 +766,21 @@ static void cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev) cxgbe_stats_reset(pi); for (i = 0; i < pi->n_rx_qsets; i++) { struct sge_eth_rxq *rxq = - &s->ethrxq[pi->first_qset + i]; + &s->ethrxq[pi->first_rxqset + i]; rxq->stats.pkts = 0; rxq->stats.rx_bytes = 0; } for (i = 0; i < pi->n_tx_qsets; i++) { struct sge_eth_txq *txq = - &s->ethtxq[pi->first_qset + i]; + &s->ethtxq[pi->first_txqset + i]; txq->stats.pkts = 0; txq->stats.tx_bytes = 0; txq->stats.mapping_err = 0; } + + return 0; } static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, @@ -743,11 +788,17 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev, { struct port_info *pi = eth_dev->data->dev_private; struct link_config *lc = &pi->link_cfg; - int rx_pause, tx_pause; + u8 rx_pause = 0, tx_pause = 0; + u32 caps = lc->link_caps; + + if (caps & FW_PORT_CAP32_ANEG) + fc_conf->autoneg = 1; + + if (caps & FW_PORT_CAP32_FC_TX) + tx_pause = 1; - fc_conf->autoneg = lc->fc & PAUSE_AUTONEG; - rx_pause = lc->fc & PAUSE_RX; - tx_pause = lc->fc & PAUSE_TX; + if (caps & FW_PORT_CAP32_FC_RX) + rx_pause = 1; if (rx_pause && tx_pause) fc_conf->mode = RTE_FC_FULL; @@ -764,30 +815,39 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev, struct rte_eth_fc_conf *fc_conf) { struct port_info *pi = eth_dev->data->dev_private; - struct adapter *adapter = pi->adapter; struct link_config *lc = &pi->link_cfg; + u32 new_caps = lc->admin_caps; + u8 tx_pause = 0, rx_pause = 0; + int ret; - if (lc->pcaps & FW_PORT_CAP32_ANEG) { - if (fc_conf->autoneg) - lc->requested_fc |= PAUSE_AUTONEG; - else - lc->requested_fc &= ~PAUSE_AUTONEG; + if (fc_conf->mode == RTE_FC_FULL) { + tx_pause = 1; + rx_pause = 1; + } else if (fc_conf->mode == RTE_FC_TX_PAUSE) { + tx_pause = 1; + } else if (fc_conf->mode == RTE_FC_RX_PAUSE) { + rx_pause = 1; } - if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || - (fc_conf->mode & RTE_FC_RX_PAUSE)) - lc->requested_fc |= PAUSE_RX; - else - lc->requested_fc &= ~PAUSE_RX; + ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause, + rx_pause, &new_caps); + if (ret != 0) + return ret; - if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || - (fc_conf->mode & RTE_FC_TX_PAUSE)) - lc->requested_fc |= PAUSE_TX; - else - lc->requested_fc &= ~PAUSE_TX; + if (!fc_conf->autoneg) { + if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE) + new_caps |= FW_PORT_CAP32_FORCE_PAUSE; + } else { + new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE; + } - return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan, - &pi->link_cfg); + if (new_caps != lc->admin_caps) { + ret = t4_link_l1cfg(pi, new_caps); + if (ret == 0) + lc->admin_caps = new_caps; + } + + return ret; } const uint32_t * @@ -886,6 +946,69 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev, return 0; } +static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + u16 i, idx, shift, *rss; + int ret; + + if (!(adapter->flags & FULL_INIT_DONE)) + return -ENOMEM; + + if (!reta_size || reta_size > pi->rss_size) + return -EINVAL; + + rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0); + if (!rss) + return -ENOMEM; + + rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16)); + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (!(reta_conf[idx].mask & (1ULL << shift))) + continue; + + rss[i] = reta_conf[idx].reta[shift]; + } + + ret = cxgbe_write_rss(pi, rss); + if (!ret) + rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16)); + + rte_free(rss); + return ret; +} + +static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct port_info *pi = dev->data->dev_private; + struct adapter *adapter = pi->adapter; + u16 i, idx, shift; + + if (!(adapter->flags & FULL_INIT_DONE)) + return -ENOMEM; + + if (!reta_size || reta_size > pi->rss_size) + return -EINVAL; + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (!(reta_conf[idx].mask & (1ULL << shift))) + continue; + + reta_conf[idx].reta[shift] = pi->rss[i]; + } + + return 0; +} + static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev) { RTE_SET_USED(dev); @@ -1070,6 +1193,125 @@ int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr) return 0; } +static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc, + struct rte_eth_fec_capa *capa_arr) +{ + int num = 0; + + if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) { + if (capa_arr) { + capa_arr[num].speed = ETH_SPEED_NUM_100G; + capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(RS); + } + num++; + } + + if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) { + if (capa_arr) { + capa_arr[num].speed = ETH_SPEED_NUM_50G; + capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(BASER); + } + num++; + } + + if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) { + if (capa_arr) { + capa_arr[num].speed = ETH_SPEED_NUM_25G; + capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) | + RTE_ETH_FEC_MODE_CAPA_MASK(BASER) | + RTE_ETH_FEC_MODE_CAPA_MASK(RS); + } + num++; + } + + return num; +} + +static int cxgbe_fec_get_capability(struct rte_eth_dev *dev, + struct rte_eth_fec_capa *speed_fec_capa, + unsigned int num) +{ + struct port_info *pi = dev->data->dev_private; + struct link_config *lc = &pi->link_cfg; + u8 num_entries; + + if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC))) + return -EOPNOTSUPP; + + num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL); + if (!speed_fec_capa || num < num_entries) + return num_entries; + + return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa); +} + +static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa) +{ + struct port_info *pi = dev->data->dev_private; + struct link_config *lc = &pi->link_cfg; + u32 fec_caps = 0, caps = lc->link_caps; + + if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC))) + return -EOPNOTSUPP; + + if (caps & FW_PORT_CAP32_FEC_RS) + fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS); + else if (caps & FW_PORT_CAP32_FEC_BASER_RS) + fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER); + else + fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC); + + *fec_capa = fec_caps; + return 0; +} + +static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa) +{ + struct port_info *pi = dev->data->dev_private; + u8 fec_rs = 0, fec_baser = 0, fec_none = 0; + struct link_config *lc = &pi->link_cfg; + u32 new_caps = lc->admin_caps; + int ret; + + if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC))) + return -EOPNOTSUPP; + + if (!fec_capa) + return -EINVAL; + + if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO)) + goto set_fec; + + if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC)) + fec_none = 1; + + if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER)) + fec_baser = 1; + + if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS)) + fec_rs = 1; + +set_fec: + ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps); + if (ret != 0) + return ret; + + if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC) + new_caps |= FW_PORT_CAP32_FORCE_FEC; + else + new_caps &= ~FW_PORT_CAP32_FORCE_FEC; + + if (new_caps != lc->admin_caps) { + ret = t4_link_l1cfg(pi, new_caps); + if (ret == 0) + lc->admin_caps = new_caps; + } + + return ret; +} + static const struct eth_dev_ops cxgbe_eth_dev_ops = { .dev_start = cxgbe_dev_start, .dev_stop = cxgbe_dev_stop, @@ -1093,7 +1335,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .rx_queue_start = cxgbe_dev_rx_queue_start, .rx_queue_stop = cxgbe_dev_rx_queue_stop, .rx_queue_release = cxgbe_dev_rx_queue_release, - .filter_ctrl = cxgbe_dev_filter_ctrl, + .flow_ops_get = cxgbe_dev_flow_ops_get, .stats_get = cxgbe_dev_stats_get, .stats_reset = cxgbe_dev_stats_reset, .flow_ctrl_get = cxgbe_flow_ctrl_get, @@ -1105,6 +1347,11 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .rss_hash_update = cxgbe_dev_rss_hash_update, .rss_hash_conf_get = cxgbe_dev_rss_hash_conf_get, .mac_addr_set = cxgbe_mac_addr_set, + .reta_update = cxgbe_dev_rss_reta_update, + .reta_query = cxgbe_dev_rss_reta_query, + .fec_get_capability = cxgbe_fec_get_capability, + .fec_get = cxgbe_fec_get, + .fec_set = cxgbe_fec_set, }; /* @@ -1153,6 +1400,8 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) return 0; } + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; + snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id); adapter = rte_zmalloc(name, sizeof(*adapter), 0); if (!adapter) @@ -1169,6 +1418,8 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev) adapter->eth_dev = eth_dev; pi->adapter = adapter; + cxgbe_process_devargs(adapter); + err = cxgbe_probe(adapter); if (err) { dev_err(adapter, "%s: cxgbe probe failed with err %d\n", @@ -1185,12 +1436,15 @@ out_free_adapter: static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev) { - struct port_info *pi = eth_dev->data->dev_private; - struct adapter *adap = pi->adapter; + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + uint16_t port_id; + int err = 0; /* Free up other ports and all resources */ - cxgbe_close(adap); - return 0; + RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) + err |= rte_eth_dev_close(port_id); + + return err == 0 ? 0 : -EIO; } static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, @@ -1216,5 +1470,9 @@ RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd); RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl); RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci"); RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe, - CXGBE_DEVARG_KEEP_OVLAN "=<0|1> " - CXGBE_DEVARG_FORCE_LINK_UP "=<0|1> "); + CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> " + CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> " + CXGBE_DEVARG_PF_FILTER_MODE "= " + CXGBE_DEVARG_PF_FILTER_MASK "= "); +RTE_LOG_REGISTER_DEFAULT(cxgbe_logtype, NOTICE); +RTE_LOG_REGISTER_SUFFIX(cxgbe_mbox_logtype, mbox, NOTICE);