net/cxgbe: initialize SGE and queues for VF
authorKumar Sanghvi <kumaras@chelsio.com>
Sat, 10 Mar 2018 22:48:23 +0000 (04:18 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 30 Mar 2018 12:08:44 +0000 (14:08 +0200)
Query firmware and initialize SGE parameters and enable queue
allocation for VF.  Calculate pcie channel and queue congestion
management for VF.

Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com>
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
drivers/net/cxgbe/base/adapter.h
drivers/net/cxgbe/base/common.h
drivers/net/cxgbe/base/t4_regs.h
drivers/net/cxgbe/base/t4fw_interface.h
drivers/net/cxgbe/base/t4vf_hw.c
drivers/net/cxgbe/cxgbe_ethdev.c
drivers/net/cxgbe/cxgbe_main.c
drivers/net/cxgbe/cxgbevf_main.c
drivers/net/cxgbe/sge.c

index 6b2fc8b..95752d1 100644 (file)
@@ -249,6 +249,7 @@ struct sge_txq {
        unsigned int equeidx;      /* last sent credit request */
        unsigned int last_pidx;    /* last pidx recorded by tx monitor */
        unsigned int last_coal_idx;/* last coal-idx recorded by tx monitor */
+       unsigned int abs_id;
 
        int db_disabled;            /* doorbell state */
        unsigned short db_pidx;     /* doorbell producer index */
@@ -719,6 +720,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
                     const struct pkt_gl *gl);
 int t4_sge_init(struct adapter *adap);
+int t4vf_sge_init(struct adapter *adap);
 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
                         struct rte_eth_dev *eth_dev, uint16_t queue_id,
                         unsigned int iqid, int socket_id);
index 3307827..d74903d 100644 (file)
@@ -333,6 +333,7 @@ int t4vf_fw_reset(struct adapter *adap);
 int t4_fw_halt(struct adapter *adap, unsigned int mbox, int reset);
 int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset);
 int t4_fl_pkt_align(struct adapter *adap);
+int t4vf_fl_pkt_align(struct adapter *adap, u32 sge_control, u32 sge_control2);
 int t4vf_get_vfres(struct adapter *adap);
 int t4_fixup_host_params_compat(struct adapter *adap, unsigned int page_size,
                                unsigned int cache_line_size,
index f553d14..43d6a0c 100644 (file)
@@ -77,6 +77,7 @@
 #define SGE_BASE_ADDR 0x1000
 
 #define A_SGE_PF_KDOORBELL 0x0
+#define A_SGE_VF_KDOORBELL 0x0
 
 #define S_QID    15
 #define M_QID    0x1ffffU
 
 #define A_SGE_PF_GTS 0x4
 
+#define T4VF_SGE_BASE_ADDR 0x0000
+#define A_SGE_VF_GTS 0x4
+
 #define S_INGRESSQID    16
 #define M_INGRESSQID    0xffffU
 #define V_INGRESSQID(x) ((x) << S_INGRESSQID)
 #define V_QUEUESPERPAGEPF0(x) ((x) << S_QUEUESPERPAGEPF0)
 #define G_QUEUESPERPAGEPF0(x) (((x) >> S_QUEUESPERPAGEPF0) & M_QUEUESPERPAGEPF0)
 
+#define A_SGE_EGRESS_QUEUES_PER_PAGE_VF 0x1014
+
 #define S_ERR_CPL_EXCEED_IQE_SIZE    22
 #define V_ERR_CPL_EXCEED_IQE_SIZE(x) ((x) << S_ERR_CPL_EXCEED_IQE_SIZE)
 #define F_ERR_CPL_EXCEED_IQE_SIZE    V_ERR_CPL_EXCEED_IQE_SIZE(1U)
 
 #define A_SGE_CONM_CTRL 0x1094
 
+#define S_T6_EGRTHRESHOLDPACKING    16
+#define M_T6_EGRTHRESHOLDPACKING    0xffU
+#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & \
+                                    M_T6_EGRTHRESHOLDPACKING)
+
 #define S_EGRTHRESHOLD    8
 #define M_EGRTHRESHOLD    0x3fU
 #define V_EGRTHRESHOLD(x) ((x) << S_EGRTHRESHOLD)
 #define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5)
 
 #define A_SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+#define A_SGE_INGRESS_QUEUES_PER_PAGE_VF 0x10f8
 
 #define A_SGE_CONTROL2 0x1124
 
index b40bfb9..6c5c972 100644 (file)
@@ -480,6 +480,7 @@ struct fw_caps_config_cmd {
 enum fw_params_mnem {
        FW_PARAMS_MNEM_DEV              = 1,    /* device params */
        FW_PARAMS_MNEM_PFVF             = 2,    /* function params */
+       FW_PARAMS_MNEM_REG              = 3,    /* limited register access */
        FW_PARAMS_MNEM_DMAQ             = 4,    /* dma queue params */
 };
 
@@ -887,6 +888,11 @@ struct fw_eq_eth_cmd {
 #define G_FW_EQ_ETH_CMD_EQID(x)        \
        (((x) >> S_FW_EQ_ETH_CMD_EQID) & M_FW_EQ_ETH_CMD_EQID)
 
+#define S_FW_EQ_ETH_CMD_PHYSEQID        0
+#define M_FW_EQ_ETH_CMD_PHYSEQID        0xfffff
+#define G_FW_EQ_ETH_CMD_PHYSEQID(x)     \
+       (((x) >> S_FW_EQ_ETH_CMD_PHYSEQID) & M_FW_EQ_ETH_CMD_PHYSEQID)
+
 #define S_FW_EQ_ETH_CMD_FETCHRO                22
 #define M_FW_EQ_ETH_CMD_FETCHRO                0x1
 #define V_FW_EQ_ETH_CMD_FETCHRO(x)     ((x) << S_FW_EQ_ETH_CMD_FETCHRO)
index 6f222c4..8e48588 100644 (file)
@@ -460,6 +460,46 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
        return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
 }
 
+/**
+ * t4vf_fl_pkt_align - return the fl packet alignment
+ * @adapter: the adapter
+ *
+ * T4 has a single field to specify the packing and padding boundary.
+ * T5 onwards has separate fields for this and hence the alignment for
+ * next packet offset is maximum of these two.
+ */
+int t4vf_fl_pkt_align(struct adapter *adapter, u32 sge_control,
+                     u32 sge_control2)
+{
+       unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
+
+       /* T4 uses a single control field to specify both the PCIe Padding and
+        * Packing Boundary.  T5 introduced the ability to specify these
+        * separately.  The actual Ingress Packet Data alignment boundary
+        * within Packed Buffer Mode is the maximum of these two
+        * specifications.
+        */
+       if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+               ingpad_shift = X_INGPADBOUNDARY_SHIFT;
+       else
+               ingpad_shift = X_T6_INGPADBOUNDARY_SHIFT;
+
+       ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) + ingpad_shift);
+
+       fl_align = ingpadboundary;
+       if (!is_t4(adapter->params.chip)) {
+               ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
+               if (ingpackboundary == X_INGPACKBOUNDARY_16B)
+                       ingpackboundary = 16;
+               else
+                       ingpackboundary = 1 << (ingpackboundary +
+                                       X_INGPACKBOUNDARY_SHIFT);
+
+               fl_align = max(ingpadboundary, ingpackboundary);
+       }
+       return fl_align;
+}
+
 unsigned int t4vf_get_pf_from_vf(struct adapter *adapter)
 {
        u32 whoami;
index 6d4654b..ef0a3f2 100644 (file)
@@ -482,9 +482,8 @@ int cxgbe_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
        err = t4_sge_alloc_eth_txq(adapter, txq, eth_dev, queue_idx,
                                   s->fw_evtq.cntxt_id, socket_id);
 
-       dev_debug(adapter, "%s: txq->q.cntxt_id= %d err = %d\n",
-                 __func__, txq->q.cntxt_id, err);
-
+       dev_debug(adapter, "%s: txq->q.cntxt_id= %u txq->q.abs_id= %u err = %d\n",
+                 __func__, txq->q.cntxt_id, txq->q.abs_id, err);
        return err;
 }
 
@@ -611,11 +610,13 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
        err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
                               &rxq->fl, t4_ethrx_handler,
-                              t4_get_tp_ch_map(adapter, pi->tx_chan), mp,
+                              is_pf4(adapter) ?
+                              t4_get_tp_ch_map(adapter, pi->tx_chan) : 0, mp,
                               queue_idx, socket_id);
 
-       dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u\n",
-                 __func__, err, pi->port_id, rxq->rspq.cntxt_id);
+       dev_debug(adapter, "%s: err = %d; port_id = %d; cntxt_id = %u; abs_id = %u\n",
+                 __func__, err, pi->port_id, rxq->rspq.cntxt_id,
+                 rxq->rspq.abs_id);
        return err;
 }
 
index feb494b..8e5cee5 100644 (file)
@@ -1068,7 +1068,8 @@ int setup_rss(struct port_info *pi)
 static void enable_rx(struct adapter *adap, struct sge_rspq *q)
 {
        /* 0-increment GTS to start the timer and enable interrupts */
-       t4_write_reg(adap, MYPF_REG(A_SGE_PF_GTS),
+       t4_write_reg(adap, is_pf4(adap) ? MYPF_REG(A_SGE_PF_GTS) :
+                                         T4VF_SGE_BASE_ADDR + A_SGE_VF_GTS,
                     V_SEINTARM(q->intr_params) |
                     V_INGRESSQID(q->cntxt_id));
 }
index 0624267..f4d0f4d 100644 (file)
@@ -108,6 +108,11 @@ static int adap_init0vf(struct adapter *adapter)
        }
 
        adapter->pf = t4vf_get_pf_from_vf(adapter);
+       err = t4vf_sge_init(adapter);
+       if (err) {
+               dev_err(adapter->pdev_dev, "error in sge init\n");
+               return err;
+       }
 
        /* If we're running on newer firmware, let it know that we're
         * prepared to deal with encapsulated CPL messages.  Older
index 6ff8bc4..aba1a49 100644 (file)
@@ -1689,6 +1689,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
        char z_name[RTE_MEMZONE_NAMESIZE];
        char z_name_sw[RTE_MEMZONE_NAMESIZE];
        unsigned int nb_refill;
+       u8 pciechan;
 
        /* Size needs to be multiple of 16, including status entry. */
        iq->size = cxgbe_roundup(iq->size, 16);
@@ -1706,8 +1707,19 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
 
        memset(&c, 0, sizeof(c));
        c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
-                           F_FW_CMD_WRITE | F_FW_CMD_EXEC |
-                           V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
+                           F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+
+       if (is_pf4(adap)) {
+               pciechan = cong > 0 ? cxgbe_ffs(cong) - 1 : pi->tx_chan;
+               c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
+                                    V_FW_IQ_CMD_VFN(0));
+               if (cong >= 0)
+                       c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+                                                   F_FW_IQ_CMD_IQRO);
+       } else {
+               pciechan = pi->port_id;
+       }
+
        c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
                                 (sizeof(c) / 16));
        c.type_to_iqandstindex =
@@ -1719,16 +1731,12 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                      V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
                                                               -intr_idx - 1));
        c.iqdroprss_to_iqesize =
-               htons(V_FW_IQ_CMD_IQPCIECH(cong > 0 ? cxgbe_ffs(cong) - 1 :
-                                                     pi->tx_chan) |
+               htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
                      F_FW_IQ_CMD_IQGTSMODE |
                      V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
                      V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
        c.iqsize = htons(iq->size);
        c.iqaddr = cpu_to_be64(iq->phys_addr);
-       if (cong >= 0)
-               c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
-                                           F_FW_IQ_CMD_IQRO);
 
        if (fl) {
                struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
@@ -1768,7 +1776,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                               0 : F_FW_IQ_CMD_FL0PACKEN) |
                              F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
                              F_FW_IQ_CMD_FL0PADEN);
-               if (cong >= 0)
+               if (is_pf4(adap) && cong >= 0)
                        c.iqns_to_fl0congen |=
                                htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
                                      F_FW_IQ_CMD_FL0CONGCIF |
@@ -1789,7 +1797,10 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
                c.fl0addr = cpu_to_be64(fl->addr);
        }
 
-       ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+       if (is_pf4(adap))
+               ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+       else
+               ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
        if (ret)
                goto err;
 
@@ -1806,7 +1817,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
        iq->stat = (void *)&iq->desc[iq->size * 8];
        iq->eth_dev = eth_dev;
        iq->handler = hnd;
-       iq->port_id = pi->port_id;
+       iq->port_id = pi->pidx;
        iq->mb_pool = mp;
 
        /* set offset to -1 to distinguish ingress queues without FL */
@@ -1846,7 +1857,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
         * a lot easier to fix in one place ...  For now we do something very
         * simple (and hopefully less wrong).
         */
-       if (!is_t4(adap->params.chip) && cong >= 0) {
+       if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
                u32 param, val;
                int i;
 
@@ -1893,9 +1904,11 @@ err:
        return ret;
 }
 
-static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id,
+                    unsigned int abs_id)
 {
        q->cntxt_id = id;
+       q->abs_id = abs_id;
        q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
                                    &q->bar2_qid);
        q->cidx = 0;
@@ -1943,6 +1956,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
        struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
        char z_name[RTE_MEMZONE_NAMESIZE];
        char z_name_sw[RTE_MEMZONE_NAMESIZE];
+       u8 pciechan;
 
        /* Add status entries */
        nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
@@ -1961,16 +1975,22 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
 
        memset(&c, 0, sizeof(c));
        c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
-                           F_FW_CMD_WRITE | F_FW_CMD_EXEC |
-                           V_FW_EQ_ETH_CMD_PFN(adap->pf) |
-                           V_FW_EQ_ETH_CMD_VFN(0));
+                           F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+       if (is_pf4(adap)) {
+               pciechan = pi->tx_chan;
+               c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) |
+                                    V_FW_EQ_ETH_CMD_VFN(0));
+       } else {
+               pciechan = pi->port_id;
+       }
+
        c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
                                 F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
        c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
                                     V_FW_EQ_ETH_CMD_VIID(pi->viid));
        c.fetchszm_to_iqid =
                htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
-                     V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+                     V_FW_EQ_ETH_CMD_PCIECHN(pciechan) |
                      F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
        c.dcaen_to_eqsize =
                htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
@@ -1978,7 +1998,10 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
                      V_FW_EQ_ETH_CMD_EQSIZE(nentries));
        c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
 
-       ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+       if (is_pf4(adap))
+               ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+       else
+               ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
        if (ret) {
                rte_free(txq->q.sdesc);
                txq->q.sdesc = NULL;
@@ -1986,7 +2009,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
                return ret;
        }
 
-       init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)));
+       init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)),
+                G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd)));
        txq->stats.tso = 0;
        txq->stats.pkts = 0;
        txq->stats.tx_cso = 0;
@@ -2281,3 +2305,182 @@ int t4_sge_init(struct adapter *adap)
 
        return 0;
 }
+
+int t4vf_sge_init(struct adapter *adap)
+{
+       struct sge_params *sge_params = &adap->params.sge;
+       u32 sge_ingress_queues_per_page;
+       u32 sge_egress_queues_per_page;
+       u32 sge_control, sge_control2;
+       u32 fl_small_pg, fl_large_pg;
+       u32 sge_ingress_rx_threshold;
+       u32 sge_timer_value_0_and_1;
+       u32 sge_timer_value_2_and_3;
+       u32 sge_timer_value_4_and_5;
+       u32 sge_congestion_control;
+       struct sge *s = &adap->sge;
+       unsigned int s_hps, s_qpp;
+       u32 sge_host_page_size;
+       u32 params[7], vals[7];
+       int v;
+
+       /* query basic params from fw */
+       params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
+       params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
+       params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0));
+       params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1));
+       params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
+       params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
+       params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
+       v = t4vf_query_params(adap, 7, params, vals);
+       if (v != FW_SUCCESS)
+               return v;
+
+       sge_control = vals[0];
+       sge_host_page_size = vals[1];
+       fl_small_pg = vals[2];
+       fl_large_pg = vals[3];
+       sge_timer_value_0_and_1 = vals[4];
+       sge_timer_value_2_and_3 = vals[5];
+       sge_timer_value_4_and_5 = vals[6];
+
+       /*
+        * Start by vetting the basic SGE parameters which have been set up by
+        * the Physical Function Driver.
+        */
+
+       /* We only bother using the Large Page logic if the Large Page Buffer
+        * is larger than our Page Size Buffer.
+        */
+       if (fl_large_pg <= fl_small_pg)
+               fl_large_pg = 0;
+
+       /* The Page Size Buffer must be exactly equal to our Page Size and the
+        * Large Page Size Buffer should be 0 (per above) or a power of 2.
+        */
+       if (fl_small_pg != CXGBE_PAGE_SIZE ||
+           (fl_large_pg & (fl_large_pg - 1)) != 0) {
+               dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+                       fl_small_pg, fl_large_pg);
+               return -EINVAL;
+       }
+
+       if ((sge_control & F_RXPKTCPLMODE) !=
+           V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+               dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+               return -EINVAL;
+       }
+
+
+       /* Grab ingress packing boundary from SGE_CONTROL2 for */
+       params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
+       v = t4vf_query_params(adap, 1, params, vals);
+       if (v != FW_SUCCESS) {
+               dev_err(adapter, "Unable to get SGE Control2; "
+                       "probably old firmware.\n");
+               return v;
+       }
+       sge_control2 = vals[0];
+
+       params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
+       params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
+       v = t4vf_query_params(adap, 2, params, vals);
+       if (v != FW_SUCCESS)
+               return v;
+       sge_ingress_rx_threshold = vals[0];
+       sge_congestion_control = vals[1];
+       params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
+       params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+                    V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
+       v = t4vf_query_params(adap, 2, params, vals);
+       if (v != FW_SUCCESS) {
+               dev_warn(adap, "Unable to get VF SGE Queues/Page; "
+                        "probably old firmware.\n");
+               return v;
+       }
+       sge_egress_queues_per_page = vals[0];
+       sge_ingress_queues_per_page = vals[1];
+
+       /*
+        * We need the Queues/Page for our VF.  This is based on the
+        * PF from which we're instantiated and is indexed in the
+        * register we just read.
+        */
+       s_hps = (S_HOSTPAGESIZEPF0 +
+                (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf);
+       sge_params->hps =
+               ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0);
+
+       s_qpp = (S_QUEUESPERPAGEPF0 +
+                (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf);
+       sge_params->eq_qpp =
+               ((sge_egress_queues_per_page >> s_qpp)
+                & M_QUEUESPERPAGEPF0);
+       sge_params->iq_qpp =
+               ((sge_ingress_queues_per_page >> s_qpp)
+                & M_QUEUESPERPAGEPF0);
+
+       /*
+        * Now translate the queried parameters into our internal forms.
+        */
+       if (fl_large_pg)
+               s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+       s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE)
+                       ? 128 : 64);
+       s->pktshift = G_PKTSHIFT(sge_control);
+       s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2);
+
+       /*
+        * A FL with <= fl_starve_thres buffers is starving and a periodic
+        * timer will attempt to refill it.  This needs to be larger than the
+        * SGE's Egress Congestion Threshold.  If it isn't, then we can get
+        * stuck waiting for new packets while the SGE is waiting for us to
+        * give it more Free List entries.  (Note that the SGE's Egress
+        * Congestion Threshold is in units of 2 Free List pointers.)
+        */
+       switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+       case CHELSIO_T5:
+               s->fl_starve_thres =
+                       G_EGRTHRESHOLDPACKING(sge_congestion_control);
+               break;
+       case CHELSIO_T6:
+       default:
+               s->fl_starve_thres =
+                       G_T6_EGRTHRESHOLDPACKING(sge_congestion_control);
+               break;
+       }
+       s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
+
+       /*
+        * Save RX interrupt holdoff timer values and counter
+        * threshold values from the SGE parameters.
+        */
+       s->timer_val[0] = core_ticks_to_us(adap,
+                       G_TIMERVALUE0(sge_timer_value_0_and_1));
+       s->timer_val[1] = core_ticks_to_us(adap,
+                       G_TIMERVALUE1(sge_timer_value_0_and_1));
+       s->timer_val[2] = core_ticks_to_us(adap,
+                       G_TIMERVALUE2(sge_timer_value_2_and_3));
+       s->timer_val[3] = core_ticks_to_us(adap,
+                       G_TIMERVALUE3(sge_timer_value_2_and_3));
+       s->timer_val[4] = core_ticks_to_us(adap,
+                       G_TIMERVALUE4(sge_timer_value_4_and_5));
+       s->timer_val[5] = core_ticks_to_us(adap,
+                       G_TIMERVALUE5(sge_timer_value_4_and_5));
+       s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold);
+       s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold);
+       s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold);
+       s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold);
+       return 0;
+}