net/mlx5/linux: fix firmware version
[dpdk.git] / drivers / net / cxgbe / cxgbe_ethdev.c
index 2a2875f..a12a981 100644 (file)
@@ -28,8 +28,8 @@
 #include <rte_eal.h>
 #include <rte_alarm.h>
 #include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
 #include <rte_malloc.h>
 #include <rte_random.h>
 #include <rte_dev.h>
@@ -38,9 +38,6 @@
 #include "cxgbe_pfvf.h"
 #include "cxgbe_flow.h"
 
-int cxgbe_logtype;
-int cxgbe_mbox_logtype;
-
 /*
  * Macros needed to support the PCI Device ID Table ...
  */
@@ -74,6 +71,9 @@ uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        t4_os_lock(&txq->txq_lock);
        /* free up desc from already completed tx */
        reclaim_completed_tx(&txq->q);
+       if (unlikely(!nb_pkts))
+               goto out_unlock;
+
        rte_prefetch0(rte_pktmbuf_mtod(tx_pkts[0], volatile void *));
        while (total_sent < nb_pkts) {
                pkts_remain = nb_pkts - total_sent;
@@ -94,6 +94,7 @@ uint16_t cxgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                reclaim_completed_tx(&txq->q);
        }
 
+out_unlock:
        t4_os_unlock(&txq->txq_lock);
        return total_sent;
 }
@@ -115,7 +116,6 @@ int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
 {
        struct port_info *pi = eth_dev->data->dev_private;
        struct adapter *adapter = pi->adapter;
-       int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
 
        static const struct rte_eth_desc_lim cxgbe_desc_lim = {
                .nb_max = CXGBE_MAX_RING_DESC_SIZE,
@@ -125,8 +125,8 @@ int cxgbe_dev_info_get(struct rte_eth_dev *eth_dev,
 
        device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
        device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
-       device_info->max_rx_queues = max_queues;
-       device_info->max_tx_queues = max_queues;
+       device_info->max_rx_queues = adapter->sge.max_ethqsets;
+       device_info->max_tx_queues = adapter->sge.max_ethqsets;
        device_info->max_mac_addrs = 1;
        /* XXX: For now we support one MAC/port */
        device_info->max_vfs = adapter->params.arch.vfcount;
@@ -193,11 +193,12 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
                          int wait_to_complete)
 {
        struct port_info *pi = eth_dev->data->dev_private;
+       unsigned int i, work_done, budget = 32;
+       struct link_config *lc = &pi->link_cfg;
        struct adapter *adapter = pi->adapter;
-       struct sge *s = &adapter->sge;
        struct rte_eth_link new_link = { 0 };
-       unsigned int i, work_done, budget = 32;
        u8 old_link = pi->link_cfg.link_ok;
+       struct sge *s = &adapter->sge;
 
        for (i = 0; i < CXGBE_LINK_STATUS_POLL_CNT; i++) {
                if (!s->fw_evtq.desc)
@@ -218,9 +219,9 @@ int cxgbe_dev_link_update(struct rte_eth_dev *eth_dev,
 
        new_link.link_status = cxgbe_force_linkup(adapter) ?
                               ETH_LINK_UP : pi->link_cfg.link_ok;
-       new_link.link_autoneg = pi->link_cfg.autoneg;
+       new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
        new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
-       new_link.link_speed = pi->link_cfg.speed;
+       new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
 
        return rte_eth_linkstatus_set(eth_dev, &new_link);
 }
@@ -300,7 +301,7 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
                return -EINVAL;
 
        /* set to jumbo mode if needed */
-       if (new_mtu > RTE_ETHER_MAX_LEN)
+       if (new_mtu > CXGBE_ETH_MAX_LEN)
                eth_dev->data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
@@ -318,23 +319,41 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 /*
  * Stop device.
  */
-void cxgbe_dev_close(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_close(struct rte_eth_dev *eth_dev)
 {
-       struct port_info *pi = eth_dev->data->dev_private;
+       struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
        struct adapter *adapter = pi->adapter;
+       u8 i;
 
        CXGBE_FUNC_TRACE();
 
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+
        if (!(adapter->flags & FULL_INIT_DONE))
-               return;
+               return 0;
+
+       if (!pi->viid)
+               return 0;
 
        cxgbe_down(pi);
+       t4_sge_eth_release_queues(pi);
+       t4_free_vi(adapter, adapter->mbox, adapter->pf, 0, pi->viid);
+       pi->viid = 0;
 
-       /*
-        *  We clear queues only if both tx and rx path of the port
-        *  have been disabled
+       /* Free up the adapter-wide resources only after all the ports
+        * under this PF have been closed.
         */
-       t4_sge_eth_clear_queues(pi);
+       for_each_port(adapter, i) {
+               temp_pi = adap2pinfo(adapter, i);
+               if (temp_pi->viid)
+                       return 0;
+       }
+
+       cxgbe_close(adapter);
+       rte_free(adapter);
+
+       return 0;
 }
 
 /* Start the device.
@@ -398,7 +417,7 @@ out:
 /*
  * Stop device: disable rx and tx functions to allow for reconfiguring.
  */
-void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
+int cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
 {
        struct port_info *pi = eth_dev->data->dev_private;
        struct adapter *adapter = pi->adapter;
@@ -406,7 +425,7 @@ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
        CXGBE_FUNC_TRACE();
 
        if (!(adapter->flags & FULL_INIT_DONE))
-               return;
+               return 0;
 
        cxgbe_down(pi);
 
@@ -416,6 +435,8 @@ void cxgbe_dev_stop(struct rte_eth_dev *eth_dev)
         */
        t4_sge_eth_clear_queues(pi);
        eth_dev->data->scattered_rx = 0;
+
+       return 0;
 }
 
 int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
@@ -426,6 +447,10 @@ int cxgbe_dev_configure(struct rte_eth_dev *eth_dev)
 
        CXGBE_FUNC_TRACE();
 
+       if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+               eth_dev->data->dev_conf.rxmode.offloads |=
+                       DEV_RX_OFFLOAD_RSS_HASH;
+
        if (!(adapter->flags & FW_QUEUE_BOUND)) {
                err = cxgbe_setup_sge_fwevtq(adapter);
                if (err)
@@ -483,13 +508,14 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
        struct port_info *pi = eth_dev->data->dev_private;
        struct adapter *adapter = pi->adapter;
        struct sge *s = &adapter->sge;
-       struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
-       int err = 0;
        unsigned int temp_nb_desc;
+       struct sge_eth_txq *txq;
+       int err = 0;
 
+       txq = &s->ethtxq[pi->first_txqset + queue_idx];
        dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
                  __func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
-                 socket_id, pi->first_qset);
+                 socket_id, pi->first_txqset);
 
        /*  Free up the existing queue  */
        if (eth_dev->data->tx_queues[queue_idx]) {
@@ -544,17 +570,16 @@ void cxgbe_dev_tx_queue_release(void *q)
 
 int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 {
-       int ret;
        struct port_info *pi = eth_dev->data->dev_private;
        struct adapter *adap = pi->adapter;
-       struct sge_rspq *q;
+       struct sge_eth_rxq *rxq;
+       int ret;
 
        dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
                  __func__, pi->port_id, rx_queue_id);
 
-       q = eth_dev->data->rx_queues[rx_queue_id];
-
-       ret = t4_sge_eth_rxq_start(adap, q);
+       rxq = eth_dev->data->rx_queues[rx_queue_id];
+       ret = t4_sge_eth_rxq_start(adap, rxq);
        if (ret == 0)
                eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
 
@@ -563,16 +588,16 @@ int cxgbe_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 
 int cxgbe_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 {
-       int ret;
        struct port_info *pi = eth_dev->data->dev_private;
        struct adapter *adap = pi->adapter;
-       struct sge_rspq *q;
+       struct sge_eth_rxq *rxq;
+       int ret;
 
        dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
                  __func__, pi->port_id, rx_queue_id);
 
-       q = eth_dev->data->rx_queues[rx_queue_id];
-       ret = t4_sge_eth_rxq_stop(adap, q);
+       rxq = eth_dev->data->rx_queues[rx_queue_id];
+       ret = t4_sge_eth_rxq_stop(adap, rxq);
        if (ret == 0)
                eth_dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
 
@@ -585,16 +610,16 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
                             const struct rte_eth_rxconf *rx_conf __rte_unused,
                             struct rte_mempool *mp)
 {
+       unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
        struct port_info *pi = eth_dev->data->dev_private;
        struct adapter *adapter = pi->adapter;
+       struct rte_eth_dev_info dev_info;
        struct sge *s = &adapter->sge;
-       struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
-       int err = 0;
-       int msi_idx = 0;
        unsigned int temp_nb_desc;
-       struct rte_eth_dev_info dev_info;
-       unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       int err = 0, msi_idx = 0;
+       struct sge_eth_rxq *rxq;
 
+       rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
        dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
                  __func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
                  socket_id, mp);
@@ -645,7 +670,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
                rxq->fl.size = temp_nb_desc;
 
        /* Set to jumbo mode if necessary */
-       if (pkt_len > RTE_ETHER_MAX_LEN)
+       if (pkt_len > CXGBE_ETH_MAX_LEN)
                eth_dev->data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
@@ -667,11 +692,10 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 void cxgbe_dev_rx_queue_release(void *q)
 {
        struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
-       struct sge_rspq *rq = &rxq->rspq;
 
-       if (rq) {
+       if (rxq) {
                struct port_info *pi = (struct port_info *)
-                                      (rq->eth_dev->data->dev_private);
+                                      (rxq->rspq.eth_dev->data->dev_private);
                struct adapter *adap = pi->adapter;
 
                dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
@@ -711,7 +735,7 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
 
        for (i = 0; i < pi->n_rx_qsets; i++) {
                struct sge_eth_rxq *rxq =
-                       &s->ethrxq[pi->first_qset + i];
+                       &s->ethrxq[pi->first_rxqset + i];
 
                eth_stats->q_ipackets[i] = rxq->stats.pkts;
                eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
@@ -721,7 +745,7 @@ static int cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev,
 
        for (i = 0; i < pi->n_tx_qsets; i++) {
                struct sge_eth_txq *txq =
-                       &s->ethtxq[pi->first_qset + i];
+                       &s->ethtxq[pi->first_txqset + i];
 
                eth_stats->q_opackets[i] = txq->stats.pkts;
                eth_stats->q_obytes[i] = txq->stats.tx_bytes;
@@ -742,14 +766,14 @@ static int cxgbe_dev_stats_reset(struct rte_eth_dev *eth_dev)
        cxgbe_stats_reset(pi);
        for (i = 0; i < pi->n_rx_qsets; i++) {
                struct sge_eth_rxq *rxq =
-                       &s->ethrxq[pi->first_qset + i];
+                       &s->ethrxq[pi->first_rxqset + i];
 
                rxq->stats.pkts = 0;
                rxq->stats.rx_bytes = 0;
        }
        for (i = 0; i < pi->n_tx_qsets; i++) {
                struct sge_eth_txq *txq =
-                       &s->ethtxq[pi->first_qset + i];
+                       &s->ethtxq[pi->first_txqset + i];
 
                txq->stats.pkts = 0;
                txq->stats.tx_bytes = 0;
@@ -764,11 +788,17 @@ static int cxgbe_flow_ctrl_get(struct rte_eth_dev *eth_dev,
 {
        struct port_info *pi = eth_dev->data->dev_private;
        struct link_config *lc = &pi->link_cfg;
-       int rx_pause, tx_pause;
+       u8 rx_pause = 0, tx_pause = 0;
+       u32 caps = lc->link_caps;
+
+       if (caps & FW_PORT_CAP32_ANEG)
+               fc_conf->autoneg = 1;
 
-       fc_conf->autoneg = lc->fc & PAUSE_AUTONEG;
-       rx_pause = lc->fc & PAUSE_RX;
-       tx_pause = lc->fc & PAUSE_TX;
+       if (caps & FW_PORT_CAP32_FC_TX)
+               tx_pause = 1;
+
+       if (caps & FW_PORT_CAP32_FC_RX)
+               rx_pause = 1;
 
        if (rx_pause && tx_pause)
                fc_conf->mode = RTE_FC_FULL;
@@ -785,30 +815,39 @@ static int cxgbe_flow_ctrl_set(struct rte_eth_dev *eth_dev,
                               struct rte_eth_fc_conf *fc_conf)
 {
        struct port_info *pi = eth_dev->data->dev_private;
-       struct adapter *adapter = pi->adapter;
        struct link_config *lc = &pi->link_cfg;
+       u32 new_caps = lc->admin_caps;
+       u8 tx_pause = 0, rx_pause = 0;
+       int ret;
 
-       if (lc->pcaps & FW_PORT_CAP32_ANEG) {
-               if (fc_conf->autoneg)
-                       lc->requested_fc |= PAUSE_AUTONEG;
-               else
-                       lc->requested_fc &= ~PAUSE_AUTONEG;
+       if (fc_conf->mode == RTE_FC_FULL) {
+               tx_pause = 1;
+               rx_pause = 1;
+       } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+               tx_pause = 1;
+       } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+               rx_pause = 1;
        }
 
-       if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-           (fc_conf->mode & RTE_FC_RX_PAUSE))
-               lc->requested_fc |= PAUSE_RX;
-       else
-               lc->requested_fc &= ~PAUSE_RX;
+       ret = t4_set_link_pause(pi, fc_conf->autoneg, tx_pause,
+                               rx_pause, &new_caps);
+       if (ret != 0)
+               return ret;
 
-       if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) ||
-           (fc_conf->mode & RTE_FC_TX_PAUSE))
-               lc->requested_fc |= PAUSE_TX;
-       else
-               lc->requested_fc &= ~PAUSE_TX;
+       if (!fc_conf->autoneg) {
+               if (lc->pcaps & FW_PORT_CAP32_FORCE_PAUSE)
+                       new_caps |= FW_PORT_CAP32_FORCE_PAUSE;
+       } else {
+               new_caps &= ~FW_PORT_CAP32_FORCE_PAUSE;
+       }
 
-       return t4_link_l1cfg(adapter, adapter->mbox, pi->tx_chan,
-                            &pi->link_cfg);
+       if (new_caps != lc->admin_caps) {
+               ret = t4_link_l1cfg(pi, new_caps);
+               if (ret == 0)
+                       lc->admin_caps = new_caps;
+       }
+
+       return ret;
 }
 
 const uint32_t *
@@ -907,6 +946,69 @@ static int cxgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int cxgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+                                    struct rte_eth_rss_reta_entry64 *reta_conf,
+                                    uint16_t reta_size)
+{
+       struct port_info *pi = dev->data->dev_private;
+       struct adapter *adapter = pi->adapter;
+       u16 i, idx, shift, *rss;
+       int ret;
+
+       if (!(adapter->flags & FULL_INIT_DONE))
+               return -ENOMEM;
+
+       if (!reta_size || reta_size > pi->rss_size)
+               return -EINVAL;
+
+       rss = rte_calloc(NULL, pi->rss_size, sizeof(u16), 0);
+       if (!rss)
+               return -ENOMEM;
+
+       rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
+       for (i = 0; i < reta_size; i++) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               if (!(reta_conf[idx].mask & (1ULL << shift)))
+                       continue;
+
+               rss[i] = reta_conf[idx].reta[shift];
+       }
+
+       ret = cxgbe_write_rss(pi, rss);
+       if (!ret)
+               rte_memcpy(pi->rss, rss, pi->rss_size * sizeof(u16));
+
+       rte_free(rss);
+       return ret;
+}
+
+static int cxgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+                                   struct rte_eth_rss_reta_entry64 *reta_conf,
+                                   uint16_t reta_size)
+{
+       struct port_info *pi = dev->data->dev_private;
+       struct adapter *adapter = pi->adapter;
+       u16 i, idx, shift;
+
+       if (!(adapter->flags & FULL_INIT_DONE))
+               return -ENOMEM;
+
+       if (!reta_size || reta_size > pi->rss_size)
+               return -EINVAL;
+
+       for (i = 0; i < reta_size; i++) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               if (!(reta_conf[idx].mask & (1ULL << shift)))
+                       continue;
+
+               reta_conf[idx].reta[shift] = pi->rss[i];
+       }
+
+       return 0;
+}
+
 static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev)
 {
        RTE_SET_USED(dev);
@@ -1091,6 +1193,125 @@ int cxgbe_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
        return 0;
 }
 
+static int cxgbe_fec_get_capa_speed_to_fec(struct link_config *lc,
+                                          struct rte_eth_fec_capa *capa_arr)
+{
+       int num = 0;
+
+       if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
+               if (capa_arr) {
+                       capa_arr[num].speed = ETH_SPEED_NUM_100G;
+                       capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+                                            RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+               }
+               num++;
+       }
+
+       if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
+               if (capa_arr) {
+                       capa_arr[num].speed = ETH_SPEED_NUM_50G;
+                       capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+                                            RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+               }
+               num++;
+       }
+
+       if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
+               if (capa_arr) {
+                       capa_arr[num].speed = ETH_SPEED_NUM_25G;
+                       capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
+                                            RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
+                                            RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+               }
+               num++;
+       }
+
+       return num;
+}
+
+static int cxgbe_fec_get_capability(struct rte_eth_dev *dev,
+                                   struct rte_eth_fec_capa *speed_fec_capa,
+                                   unsigned int num)
+{
+       struct port_info *pi = dev->data->dev_private;
+       struct link_config *lc = &pi->link_cfg;
+       u8 num_entries;
+
+       if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
+               return -EOPNOTSUPP;
+
+       num_entries = cxgbe_fec_get_capa_speed_to_fec(lc, NULL);
+       if (!speed_fec_capa || num < num_entries)
+               return num_entries;
+
+       return cxgbe_fec_get_capa_speed_to_fec(lc, speed_fec_capa);
+}
+
+static int cxgbe_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
+{
+       struct port_info *pi = dev->data->dev_private;
+       struct link_config *lc = &pi->link_cfg;
+       u32 fec_caps = 0, caps = lc->link_caps;
+
+       if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
+               return -EOPNOTSUPP;
+
+       if (caps & FW_PORT_CAP32_FEC_RS)
+               fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+       else if (caps & FW_PORT_CAP32_FEC_BASER_RS)
+               fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+       else
+               fec_caps = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+
+       *fec_capa = fec_caps;
+       return 0;
+}
+
+static int cxgbe_fec_set(struct rte_eth_dev *dev, uint32_t fec_capa)
+{
+       struct port_info *pi = dev->data->dev_private;
+       u8 fec_rs = 0, fec_baser = 0, fec_none = 0;
+       struct link_config *lc = &pi->link_cfg;
+       u32 new_caps = lc->admin_caps;
+       int ret;
+
+       if (!(lc->pcaps & V_FW_PORT_CAP32_FEC(M_FW_PORT_CAP32_FEC)))
+               return -EOPNOTSUPP;
+
+       if (!fec_capa)
+               return -EINVAL;
+
+       if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(AUTO))
+               goto set_fec;
+
+       if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC))
+               fec_none = 1;
+
+       if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(BASER))
+               fec_baser = 1;
+
+       if (fec_capa & RTE_ETH_FEC_MODE_CAPA_MASK(RS))
+               fec_rs = 1;
+
+set_fec:
+       ret = t4_set_link_fec(pi, fec_rs, fec_baser, fec_none, &new_caps);
+       if (ret != 0)
+               return ret;
+
+       if (lc->pcaps & FW_PORT_CAP32_FORCE_FEC)
+               new_caps |= FW_PORT_CAP32_FORCE_FEC;
+       else
+               new_caps &= ~FW_PORT_CAP32_FORCE_FEC;
+
+       if (new_caps != lc->admin_caps) {
+               ret = t4_link_l1cfg(pi, new_caps);
+               if (ret == 0)
+                       lc->admin_caps = new_caps;
+       }
+
+       return ret;
+}
+
 static const struct eth_dev_ops cxgbe_eth_dev_ops = {
        .dev_start              = cxgbe_dev_start,
        .dev_stop               = cxgbe_dev_stop,
@@ -1114,7 +1335,7 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
        .rx_queue_start         = cxgbe_dev_rx_queue_start,
        .rx_queue_stop          = cxgbe_dev_rx_queue_stop,
        .rx_queue_release       = cxgbe_dev_rx_queue_release,
-       .filter_ctrl            = cxgbe_dev_filter_ctrl,
+       .flow_ops_get           = cxgbe_dev_flow_ops_get,
        .stats_get              = cxgbe_dev_stats_get,
        .stats_reset            = cxgbe_dev_stats_reset,
        .flow_ctrl_get          = cxgbe_flow_ctrl_get,
@@ -1126,6 +1347,11 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = {
        .rss_hash_update        = cxgbe_dev_rss_hash_update,
        .rss_hash_conf_get      = cxgbe_dev_rss_hash_conf_get,
        .mac_addr_set           = cxgbe_mac_addr_set,
+       .reta_update            = cxgbe_dev_rss_reta_update,
+       .reta_query             = cxgbe_dev_rss_reta_query,
+       .fec_get_capability     = cxgbe_fec_get_capability,
+       .fec_get                = cxgbe_fec_get,
+       .fec_set                = cxgbe_fec_set,
 };
 
 /*
@@ -1174,6 +1400,8 @@ static int eth_cxgbe_dev_init(struct rte_eth_dev *eth_dev)
                return 0;
        }
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        snprintf(name, sizeof(name), "cxgbeadapter%d", eth_dev->data->port_id);
        adapter = rte_zmalloc(name, sizeof(*adapter), 0);
        if (!adapter)
@@ -1208,12 +1436,15 @@ out_free_adapter:
 
 static int eth_cxgbe_dev_uninit(struct rte_eth_dev *eth_dev)
 {
-       struct port_info *pi = eth_dev->data->dev_private;
-       struct adapter *adap = pi->adapter;
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+       uint16_t port_id;
+       int err = 0;
 
        /* Free up other ports and all resources */
-       cxgbe_close(adap);
-       return 0;
+       RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device)
+               err |= rte_eth_dev_close(port_id);
+
+       return err == 0 ? 0 : -EIO;
 }
 
 static int eth_cxgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
@@ -1239,14 +1470,9 @@ RTE_PMD_REGISTER_PCI(net_cxgbe, rte_cxgbe_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_cxgbe, cxgb4_pci_tbl);
 RTE_PMD_REGISTER_KMOD_DEP(net_cxgbe, "* igb_uio | uio_pci_generic | vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(net_cxgbe,
-                             CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> ");
-
-RTE_INIT(cxgbe_init_log)
-{
-       cxgbe_logtype = rte_log_register("pmd.net.cxgbe");
-       if (cxgbe_logtype >= 0)
-               rte_log_set_level(cxgbe_logtype, RTE_LOG_NOTICE);
-       cxgbe_mbox_logtype = rte_log_register("pmd.net.cxgbe.mbox");
-       if (cxgbe_mbox_logtype >= 0)
-               rte_log_set_level(cxgbe_mbox_logtype, RTE_LOG_NOTICE);
-}
+                             CXGBE_DEVARG_CMN_KEEP_OVLAN "=<0|1> "
+                             CXGBE_DEVARG_CMN_TX_MODE_LATENCY "=<0|1> "
+                             CXGBE_DEVARG_PF_FILTER_MODE "=<uint32> "
+                             CXGBE_DEVARG_PF_FILTER_MASK "=<uint32> ");
+RTE_LOG_REGISTER_DEFAULT(cxgbe_logtype, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(cxgbe_mbox_logtype, mbox, NOTICE);