-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2014-2015 Chelsio Communications.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Chelsio Communications nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Chelsio Communications.
+ * All rights reserved.
*/
-#include <linux/if_ether.h>
#include <sys/queue.h>
#include <stdio.h>
#include <errno.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_atomic.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
-#include "common.h"
-#include "t4_regs.h"
-#include "t4_msg.h"
+#include "base/common.h"
+#include "base/t4_regs.h"
+#include "base/t4_msg.h"
#include "cxgbe.h"
static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
/*
* Max number of Rx buffers we replenish at a time.
*/
-#define MAX_RX_REFILL 16U
+#define MAX_RX_REFILL 64U
#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
*/
#define MAX_IMM_TX_PKT_LEN 256
+/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
/*
* Rx buffer sizes for "usembufs" Free List buffers (one ingress packet
* per mbuf buffer). We currently only support two sizes for 1500- and
{
struct sge *s = &adapter->sge;
- return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
+ return CXGBE_ALIGN(s->pktshift + RTE_ETHER_HDR_LEN + VLAN_HLEN + mtu,
+ s->fl_align);
}
#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
struct rte_mbuf *m = mbuf;
for (; m; m = m->next, addr++) {
- *addr = m->buf_physaddr + rte_pktmbuf_headroom(m);
+ *addr = m->buf_iova + rte_pktmbuf_headroom(m);
if (*addr == 0)
goto out_err;
}
static void reclaim_tx_desc(struct sge_txq *q, unsigned int n)
{
+ struct tx_sw_desc *d;
unsigned int cidx = q->cidx;
+ d = &q->sdesc[cidx];
while (n--) {
- if (++cidx == q->size)
+ if (d->mbuf) { /* an SGL is present */
+ rte_pktmbuf_free(d->mbuf);
+ d->mbuf = NULL;
+ }
+ ++d;
+ if (++cidx == q->size) {
cidx = 0;
+ d = q->sdesc;
+ }
}
q->cidx = cidx;
}
static inline unsigned int get_buf_size(struct adapter *adapter,
const struct rx_sw_desc *d)
{
- struct sge *s = &adapter->sge;
unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
- unsigned int buf_size;
+ unsigned int buf_size = 0;
switch (rx_buf_size_idx) {
- case RX_SMALL_PG_BUF:
- buf_size = PAGE_SIZE;
- break;
-
- case RX_LARGE_PG_BUF:
- buf_size = PAGE_SIZE << s->fl_pg_order;
- break;
-
case RX_SMALL_MTU_BUF:
buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
break;
default:
BUG_ON(1);
- buf_size = 0; /* deal with bogus compiler warnings */
- /* NOTREACHED */
+ /* NOT REACHED */
}
return buf_size;
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
- if (q->pend_cred >= 8) {
+ if (q->pend_cred >= 64) {
u32 val = adap->params.arch.sge_fl_db;
if (is_t4(adap->params.chip))
* mechanism.
*/
if (unlikely(!q->bar2_addr)) {
- t4_write_reg(adap, MYPF_REG(A_SGE_PF_KDOORBELL),
- val | V_QID(q->cntxt_id));
+ u32 reg = is_pf4(adap) ? MYPF_REG(A_SGE_PF_KDOORBELL) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_KDOORBELL;
+
+ t4_write_reg_relaxed(adap, reg,
+ val | V_QID(q->cntxt_id));
} else {
- writel(val | V_QID(q->bar2_qid),
- (void *)((uintptr_t)q->bar2_addr +
- SGE_UDB_KDOORBELL));
+ writel_relaxed(val | V_QID(q->bar2_qid),
+ (void *)((uintptr_t)q->bar2_addr +
+ SGE_UDB_KDOORBELL));
/*
* This Write memory Barrier will force the write to
}
}
-static inline struct rte_mbuf *cxgbe_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
static inline void set_rx_sw_desc(struct rx_sw_desc *sd, void *buf,
dma_addr_t mapping)
{
__be64 *d = &q->desc[q->pidx];
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
unsigned int buf_size_idx = RX_SMALL_MTU_BUF;
+ struct rte_mbuf *buf_bulk[n];
+ int ret, i;
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+ /* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
+ mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
+ if (jumbo_en &&
+ ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
+ buf_size_idx = RX_LARGE_MTU_BUF;
+
+ ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
+ if (unlikely(ret != 0)) {
+ dev_debug(adap, "%s: failed to allocated fl entries in bulk ..\n",
+ __func__);
+ q->alloc_failed++;
+ rxq->rspq.eth_dev->data->rx_mbuf_alloc_failed++;
+ goto out;
+ }
- while (n--) {
- struct rte_mbuf *mbuf = cxgbe_rxmbuf_alloc(rxq->rspq.mb_pool);
+ for (i = 0; i < n; i++) {
+ struct rte_mbuf *mbuf = buf_bulk[i];
dma_addr_t mapping;
if (!mbuf) {
goto out;
}
- mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ rte_mbuf_refcnt_set(mbuf, 1);
+ mbuf->data_off =
+ (uint16_t)((char *)
+ RTE_PTR_ALIGN((char *)mbuf->buf_addr +
+ RTE_PKTMBUF_HEADROOM,
+ adap->sge.fl_align) -
+ (char *)mbuf->buf_addr);
mbuf->next = NULL;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->rspq.port_id;
- mapping = (dma_addr_t)(mbuf->buf_physaddr + mbuf->data_off);
-
+ mapping = (dma_addr_t)RTE_ALIGN(mbuf->buf_iova +
+ mbuf->data_off,
+ adap->sge.fl_align);
mapping |= buf_size_idx;
*d++ = cpu_to_be64(mapping);
set_rx_sw_desc(sd, mbuf, mapping);
/**
* calc_tx_flits - calculate the number of flits for a packet Tx WR
* @m: the packet
+ * @adap: adapter structure pointer
*
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_flits(const struct rte_mbuf *m)
+static inline unsigned int calc_tx_flits(const struct rte_mbuf *m,
+ struct adapter *adap)
{
+ size_t wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkt_wr) :
+ sizeof(struct fw_eth_tx_pkt_vm_wr);
unsigned int flits;
int hdrlen;
* Write Header (incorporated as part of the cpl_tx_pkt_lso and
* cpl_tx_pkt structures), followed by either a TX Packet Write CPL
* message or, if we're doing a Large Send Offload, an LSO CPL message
- * with an embeded TX Packet Write CPL message.
+ * with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(m->nb_segs);
if (m->tso_segsz)
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
+ flits += (wr_size + sizeof(struct cpl_tx_pkt_lso_core) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
else
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
+ flits += (wr_size +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
return flits;
}
((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head))
#define Q_IDXDIFF(q, idx) IDXDIFF((q)->pidx, (q)->idx, (q)->size)
+#define R_IDXDIFF(q, idx) IDXDIFF((q)->cidx, (q)->idx, (q)->size)
+
+#define PIDXDIFF(head, tail, wrap) \
+ ((tail) >= (head) ? (tail) - (head) : (wrap) - (head) + (tail))
+#define P_IDXDIFF(q, idx) PIDXDIFF((q)->cidx, idx, (q)->size)
/**
* ring_tx_db - ring a Tx queue's doorbell
}
if (likely(csum_type >= TX_CSUM_TCPIP)) {
- int hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
+ u64 hdr_len = V_TXPKT_IPHDR_LEN(m->l3_len);
int eth_hdr_len = m->l2_len;
if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
struct adapter *adap = (struct adapter *)data;
struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
int i;
+ unsigned int coal_idx;
/* monitor any pending tx */
for (i = 0; i < adap->sge.max_ethqsets; i++, txq++) {
- t4_os_lock(&txq->txq_lock);
- if (txq->q.coalesce.idx) {
- if (txq->q.coalesce.idx == txq->q.last_coal_idx &&
- txq->q.pidx == txq->q.last_pidx) {
- ship_tx_pkt_coalesce_wr(adap, txq);
- } else {
- txq->q.last_coal_idx = txq->q.coalesce.idx;
- txq->q.last_pidx = txq->q.pidx;
+ if (t4_os_trylock(&txq->txq_lock)) {
+ coal_idx = txq->q.coalesce.idx;
+ if (coal_idx) {
+ if (coal_idx == txq->q.last_coal_idx &&
+ txq->q.pidx == txq->q.last_pidx) {
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ } else {
+ txq->q.last_coal_idx = coal_idx;
+ txq->q.last_pidx = txq->q.pidx;
+ }
}
+ t4_os_unlock(&txq->txq_lock);
}
- t4_os_unlock(&txq->txq_lock);
}
rte_eal_alarm_set(50, tx_timer_cb, (void *)adap);
}
static inline void ship_tx_pkt_coalesce_wr(struct adapter *adap,
struct sge_eth_txq *txq)
{
- u32 wr_mid;
- struct sge_txq *q = &txq->q;
+ struct fw_eth_tx_pkts_vm_wr *vmwr;
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
struct fw_eth_tx_pkts_wr *wr;
+ struct sge_txq *q = &txq->q;
unsigned int ndesc;
+ u32 wr_mid;
/* fill the pkts WR header */
wr = (void *)&q->desc[q->pidx];
- wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_WR));
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ vmwr = (void *)&q->desc[q->pidx];
wr_mid = V_FW_WR_LEN16(DIV_ROUND_UP(q->coalesce.flits, 2));
ndesc = flits_to_desc(q->coalesce.flits);
wr->plen = cpu_to_be16(q->coalesce.len);
wr->npkt = q->coalesce.idx;
wr->r3 = 0;
- wr->type = q->coalesce.type;
+ if (is_pf4(adap)) {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
+ wr->type = q->coalesce.type;
+ } else {
+ wr->op_pkd = htonl(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR));
+ vmwr->r4 = 0;
+ memcpy((void *)vmwr->ethmacdst, (void *)q->coalesce.ethmacdst,
+ fw_hdr_copy_len);
+ }
/* zero out coalesce structure members */
- q->coalesce.idx = 0;
- q->coalesce.flits = 0;
- q->coalesce.len = 0;
+ memset((void *)&q->coalesce, 0, sizeof(struct eth_coalesce));
txq_advance(q, ndesc);
txq->stats.coal_wr++;
unsigned int *nflits,
struct adapter *adap)
{
+ struct fw_eth_tx_pkts_vm_wr *wr;
+ const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
+ sizeof(wr->ethmacsrc) +
+ sizeof(wr->ethtype) +
+ sizeof(wr->vlantci));
struct sge_txq *q = &txq->q;
unsigned int flits, ndesc;
unsigned char type = 0;
- int credits, hw_cidx = ntohs(q->stat->cidx);
- int in_use = q->pidx - hw_cidx + flits_to_desc(q->coalesce.flits);
+ int credits, wr_size;
/* use coal WR type 1 when no frags are present */
type = (mbuf->nb_segs == 1) ? 1 : 0;
+ if (!is_pf4(adap)) {
+ if (!type)
+ return 0;
- if (in_use < 0)
- in_use += q->size;
+ if (q->coalesce.idx && memcmp((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *),
+ fw_hdr_copy_len))
+ ship_tx_pkt_coalesce_wr(adap, txq);
+ }
if (unlikely(type != q->coalesce.type && q->coalesce.idx))
ship_tx_pkt_coalesce_wr(adap, txq);
new:
/* start a new pkts WR, the WR header is not filled below */
- flits += sizeof(struct fw_eth_tx_pkts_wr) / sizeof(__be64);
+ wr_size = is_pf4(adap) ? sizeof(struct fw_eth_tx_pkts_wr) :
+ sizeof(struct fw_eth_tx_pkts_vm_wr);
+ flits += wr_size / sizeof(__be64);
ndesc = flits_to_desc(q->coalesce.flits + flits);
credits = txq_avail(q) - ndesc;
if (unlikely(credits < 0 || wraps_around(q, ndesc)))
return 0;
- q->coalesce.flits += 2;
+ q->coalesce.flits += wr_size / sizeof(__be64);
q->coalesce.type = type;
q->coalesce.ptr = (unsigned char *)&q->desc[q->pidx] +
- 2 * sizeof(__be64);
+ q->coalesce.flits * sizeof(__be64);
+ if (!is_pf4(adap))
+ memcpy((void *)q->coalesce.ethmacdst,
+ rte_pktmbuf_mtod(mbuf, void *), fw_hdr_copy_len);
return 1;
}
struct rte_mbuf *mbuf,
int flits, struct adapter *adap,
const struct port_info *pi,
- dma_addr_t *addr)
+ dma_addr_t *addr, uint16_t nb_pkts)
{
u64 cntrl, *end;
struct sge_txq *q = &txq->q;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(mbuf->vlan_tci);
}
- cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) |
- V_TXPKT_PF(adap->pf));
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id));
cpl->pack = htons(0);
cpl->len = htons(len);
cpl->ctrl1 = cpu_to_be64(cntrl);
sd->coalesce.sgl[idx & 1] = (struct ulptx_sgl *)(cpl + 1);
sd->coalesce.idx = (idx & 1) + 1;
- /* send the coaelsced work request if max reached */
- if (++q->coalesce.idx == ETH_COALESCE_PKT_NUM)
+ /* Send the coalesced work request, only if max reached. However,
+ * if lower latency is preferred over throughput, then don't wait
+ * for coalescing the next Tx burst and send the packets now.
+ */
+ q->coalesce.idx++;
+ if (q->coalesce.idx == adap->params.max_tx_coalesce_num ||
+ (adap->devargs.tx_mode_latency && q->coalesce.idx >= nb_pkts))
ship_tx_pkt_coalesce_wr(adap, txq);
+
return 0;
}
*
* Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
*/
-int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf)
+int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
+ uint16_t nb_pkts)
{
const struct port_info *pi;
struct cpl_tx_pkt_lso_core *lso;
struct adapter *adap;
struct rte_mbuf *m = mbuf;
struct fw_eth_tx_pkt_wr *wr;
+ struct fw_eth_tx_pkt_vm_wr *vmwr;
struct cpl_tx_pkt_core *cpl;
struct tx_sw_desc *d;
dma_addr_t addr[m->nb_segs];
u32 wr_mid;
u64 cntrl, *end;
bool v6;
+ u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
/* Reject xmit if queue is stopped */
if (unlikely(txq->flags & EQ_STOPPED))
* The chip min packet length is 10 octets but play safe and reject
* anything shorter than an Ethernet header.
*/
- if (unlikely(m->pkt_len < ETHER_HDR_LEN)) {
+ if (unlikely(m->pkt_len < RTE_ETHER_HDR_LEN)) {
out_free:
rte_pktmbuf_free(m);
return 0;
}
- rte_prefetch0(&((&txq->q)->sdesc->mbuf->pool));
- pi = (struct port_info *)txq->eth_dev->data->dev_private;
+ if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
+ (unlikely(m->pkt_len > max_pkt_len)))
+ goto out_free;
+
+ pi = txq->data->dev_private;
adap = pi->adapter;
cntrl = F_TXPKT_L4CSUM_DIS | F_TXPKT_IPCSUM_DIS;
/* align the end of coalesce WR to a 512 byte boundary */
txq->q.coalesce.max = (8 - (txq->q.pidx & 7)) * 8;
- if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ if (!((m->ol_flags & PKT_TX_TCP_SEG) ||
+ m->pkt_len > RTE_ETHER_MAX_LEN)) {
if (should_tx_packet_coalesce(txq, mbuf, &cflits, adap)) {
if (unlikely(map_mbuf(mbuf, addr) < 0)) {
dev_warn(adap, "%s: mapping err for coalesce\n",
goto out_free;
}
return tx_do_packet_coalesce(txq, mbuf, cflits, adap,
- pi, addr);
+ pi, addr, nb_pkts);
} else {
return -EBUSY;
}
if (txq->q.coalesce.idx)
ship_tx_pkt_coalesce_wr(adap, txq);
- flits = calc_tx_flits(m);
+ flits = calc_tx_flits(m, adap);
ndesc = flits_to_desc(flits);
credits = txq_avail(&txq->q) - ndesc;
}
wr = (void *)&txq->q.desc[txq->q.pidx];
+ vmwr = (void *)&txq->q.desc[txq->q.pidx];
wr->equiq_to_len16 = htonl(wr_mid);
- wr->r3 = rte_cpu_to_be_64(0);
- end = (u64 *)wr + flits;
+ if (is_pf4(adap)) {
+ wr->r3 = rte_cpu_to_be_64(0);
+ end = (u64 *)wr + flits;
+ } else {
+ const size_t fw_hdr_copy_len = (sizeof(vmwr->ethmacdst) +
+ sizeof(vmwr->ethmacsrc) +
+ sizeof(vmwr->ethtype) +
+ sizeof(vmwr->vlantci));
+
+ vmwr->r3[0] = rte_cpu_to_be_32(0);
+ vmwr->r3[1] = rte_cpu_to_be_32(0);
+ memcpy((void *)vmwr->ethmacdst, rte_pktmbuf_mtod(m, void *),
+ fw_hdr_copy_len);
+ end = (u64 *)vmwr + flits;
+ }
len = 0;
len += sizeof(*cpl);
- lso = (void *)(wr + 1);
- v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
- l3hdr_len = m->l3_len;
- l4hdr_len = m->l4_len;
- eth_xtra_len = m->l2_len - ETHER_HDR_LEN;
- len += sizeof(*lso);
- wr->op_immdlen = htonl(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
- V_FW_WR_IMMDLEN(len));
- lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
- F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
- V_LSO_IPV6(v6) |
- V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
- V_LSO_IPHDR_LEN(l3hdr_len / 4) |
- V_LSO_TCPHDR_LEN(l4hdr_len / 4));
- lso->ipid_ofst = htons(0);
- lso->mss = htons(m->tso_segsz);
- lso->seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->len = htonl(m->pkt_len);
- else
- lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
- cpl = (void *)(lso + 1);
- cntrl = V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- V_TXPKT_IPHDR_LEN(l3hdr_len) |
- V_TXPKT_ETHHDR_LEN(eth_xtra_len);
- txq->stats.tso++;
- txq->stats.tx_cso += m->tso_segsz;
+
+ /* Coalescing skipped and we send through normal path */
+ if (!(m->ol_flags & PKT_TX_TCP_SEG)) {
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
+ V_FW_WR_IMMDLEN(len));
+ if (is_pf4(adap))
+ cpl = (void *)(wr + 1);
+ else
+ cpl = (void *)(vmwr + 1);
+ if (m->ol_flags & PKT_TX_IP_CKSUM) {
+ cntrl = hwcsum(adap->params.chip, m) |
+ F_TXPKT_IPCSUM_DIS;
+ txq->stats.tx_cso++;
+ }
+ } else {
+ if (is_pf4(adap))
+ lso = (void *)(wr + 1);
+ else
+ lso = (void *)(vmwr + 1);
+ v6 = (m->ol_flags & PKT_TX_IPV6) != 0;
+ l3hdr_len = m->l3_len;
+ l4hdr_len = m->l4_len;
+ eth_xtra_len = m->l2_len - RTE_ETHER_HDR_LEN;
+ len += sizeof(*lso);
+ wr->op_immdlen = htonl(V_FW_WR_OP(is_pf4(adap) ?
+ FW_ETH_TX_PKT_WR :
+ FW_ETH_TX_PKT_VM_WR) |
+ V_FW_WR_IMMDLEN(len));
+ lso->lso_ctrl = htonl(V_LSO_OPCODE(CPL_TX_PKT_LSO) |
+ F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
+ V_LSO_IPV6(v6) |
+ V_LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+ V_LSO_IPHDR_LEN(l3hdr_len / 4) |
+ V_LSO_TCPHDR_LEN(l4hdr_len / 4));
+ lso->ipid_ofst = htons(0);
+ lso->mss = htons(m->tso_segsz);
+ lso->seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->len = htonl(m->pkt_len);
+ else
+ lso->len = htonl(V_LSO_T5_XFER_SIZE(m->pkt_len));
+ cpl = (void *)(lso + 1);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ cntrl = V_TXPKT_ETHHDR_LEN(eth_xtra_len);
+ else
+ cntrl = V_T6_TXPKT_ETHHDR_LEN(eth_xtra_len);
+
+ cntrl |= V_TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 :
+ TX_CSUM_TCPIP) |
+ V_TXPKT_IPHDR_LEN(l3hdr_len);
+ txq->stats.tso++;
+ txq->stats.tx_cso += m->tso_segsz;
+ }
if (m->ol_flags & PKT_TX_VLAN_PKT) {
txq->stats.vlan_ins++;
cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->vlan_tci);
}
- cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) |
- V_TXPKT_PF(adap->pf));
+ cpl->ctrl0 = htonl(V_TXPKT_OPCODE(CPL_TX_PKT_XT));
+ if (is_pf4(adap))
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->tx_chan) |
+ V_TXPKT_PF(adap->pf));
+ else
+ cpl->ctrl0 |= htonl(V_TXPKT_INTF(pi->port_id) |
+ V_TXPKT_PF(0));
+
cpl->pack = htons(0);
cpl->len = htons(m->pkt_len);
cpl->ctrl1 = cpu_to_be64(cntrl);
last_desc -= txq->q.size;
d = &txq->q.sdesc[last_desc];
- if (d->mbuf) {
- rte_pktmbuf_free(d->mbuf);
- d->mbuf = NULL;
+ if (d->coalesce.idx) {
+ int i;
+
+ for (i = 0; i < d->coalesce.idx; i++) {
+ rte_pktmbuf_free(d->coalesce.mbuf[i]);
+ d->coalesce.mbuf[i] = NULL;
+ }
+ d->coalesce.idx = 0;
}
write_sgl(m, &txq->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
addr);
return 0;
}
+/**
+ * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ * @q: the SGE control Tx queue
+ *
+ * This is a variant of reclaim_completed_tx() that is used for Tx queues
+ * that send only immediate data (presently just the control queues) and
+ * thus do not have any mbufs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+ int hw_cidx = ntohs(q->stat->cidx);
+ int reclaim = hw_cidx - q->cidx;
+
+ if (reclaim < 0)
+ reclaim += q->size;
+
+ q->in_use -= reclaim;
+ q->cidx = hw_cidx;
+}
+
+/**
+ * is_imm - check whether a packet can be sent as immediate data
+ * @mbuf: the packet
+ *
+ * Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct rte_mbuf *mbuf)
+{
+ return mbuf->pkt_len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ * inline_tx_mbuf: inline a packet's data into TX descriptors
+ * @q: the TX queue where the packet will be inlined
+ * @from: pointer to data portion of packet
+ * @to: pointer after cpl where data has to be inlined
+ * @len: length of data to inline
+ *
+ * Inline a packet's contents directly to TX descriptors, starting at
+ * the given position within the TX DMA ring.
+ * Most of the complexity of this operation is dealing with wrap arounds
+ * in the middle of the packet we want to inline.
+ */
+static void inline_tx_mbuf(const struct sge_txq *q, caddr_t from, caddr_t *to,
+ int len)
+{
+ int left = RTE_PTR_DIFF(q->stat, *to);
+
+ if (likely((uintptr_t)*to + len <= (uintptr_t)q->stat)) {
+ rte_memcpy(*to, from, len);
+ *to = RTE_PTR_ADD(*to, len);
+ } else {
+ rte_memcpy(*to, from, left);
+ from = RTE_PTR_ADD(from, left);
+ left = len - left;
+ rte_memcpy((void *)q->desc, from, left);
+ *to = RTE_PTR_ADD((void *)q->desc, left);
+ }
+}
+
+/**
+ * ctrl_xmit - send a packet through an SGE control Tx queue
+ * @q: the control queue
+ * @mbuf: the packet
+ *
+ * Send a packet through an SGE control Tx queue. Packets sent through
+ * a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ unsigned int ndesc;
+ struct fw_wr_hdr *wr;
+ caddr_t dst;
+
+ if (unlikely(!is_imm(mbuf))) {
+ WARN_ON(1);
+ rte_pktmbuf_free(mbuf);
+ return -1;
+ }
+
+ reclaim_completed_tx_imm(&q->q);
+ ndesc = DIV_ROUND_UP(mbuf->pkt_len, sizeof(struct tx_desc));
+ t4_os_lock(&q->ctrlq_lock);
+
+ q->full = txq_avail(&q->q) < ndesc ? 1 : 0;
+ if (unlikely(q->full)) {
+ t4_os_unlock(&q->ctrlq_lock);
+ return -1;
+ }
+
+ wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+ dst = (void *)wr;
+ inline_tx_mbuf(&q->q, rte_pktmbuf_mtod(mbuf, caddr_t),
+ &dst, mbuf->data_len);
+
+ txq_advance(&q->q, ndesc);
+ if (unlikely(txq_avail(&q->q) < 64))
+ wr->lo |= htonl(F_FW_WR_EQUEQ);
+
+ q->txp++;
+
+ ring_tx_db(q->adapter, &q->q);
+ t4_os_unlock(&q->ctrlq_lock);
+
+ rte_pktmbuf_free(mbuf);
+ return 0;
+}
+
+/**
+ * t4_mgmt_tx - send a management message
+ * @q: the control queue
+ * @mbuf: the packet containing the management message
+ *
+ * Send a management message through control queue.
+ */
+int t4_mgmt_tx(struct sge_ctrl_txq *q, struct rte_mbuf *mbuf)
+{
+ return ctrl_xmit(q, mbuf);
+}
+
/**
* alloc_ring - allocate resources for an SGE descriptor ring
* @dev: the PCI device's core device
* handle the maximum ring size is allocated in order to allow for
* resizing in later calls to the queue setup function.
*/
- tz = rte_memzone_reserve_aligned(z_name, len, socket_id, 0, 4096);
+ tz = rte_memzone_reserve_aligned(z_name, len, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, 4096);
if (!tz)
return NULL;
if (metadata)
*(void **)metadata = s;
- *phys = (uint64_t)tz->phys_addr;
+ *phys = (uint64_t)tz->iova;
return tz->addr;
}
-/**
- * t4_pktgl_to_mbuf_usembufs - build an mbuf from a packet gather list
- * @gl: the gather list
- *
- * Builds an mbuf from the given packet gather list. Returns the mbuf or
- * %NULL if mbuf allocation failed.
- */
-static struct rte_mbuf *t4_pktgl_to_mbuf_usembufs(const struct pkt_gl *gl)
-{
- /*
- * If there's only one mbuf fragment, just return that.
- */
- if (likely(gl->nfrags == 1))
- return gl->mbufs[0];
-
- return NULL;
-}
-
-/**
- * t4_pktgl_to_mbuf - build an mbuf from a packet gather list
- * @gl: the gather list
- *
- * Builds an mbuf from the given packet gather list. Returns the mbuf or
- * %NULL if mbuf allocation failed.
- */
-static struct rte_mbuf *t4_pktgl_to_mbuf(const struct pkt_gl *gl)
-{
- return t4_pktgl_to_mbuf_usembufs(gl);
-}
-
-#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
- ((dma_addr_t) ((mb)->buf_physaddr + (mb)->data_off))
-
-/**
- * t4_ethrx_handler - process an ingress ethernet packet
- * @q: the response queue that received the packet
- * @rsp: the response queue descriptor holding the RX_PKT message
- * @si: the gather list of packet fragments
- *
- * Process an ingress ethernet packet and deliver it to the stack.
- */
-int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
- const struct pkt_gl *si)
-{
- struct rte_mbuf *mbuf;
- const struct cpl_rx_pkt *pkt;
- const struct rss_header *rss_hdr;
- bool csum_ok;
- struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
-
- rss_hdr = (const void *)rsp;
- pkt = (const void *)&rsp[1];
- csum_ok = pkt->csum_calc && !pkt->err_vec;
-
- mbuf = t4_pktgl_to_mbuf(si);
- if (unlikely(!mbuf)) {
- rxq->stats.rx_drops++;
- return 0;
- }
-
- mbuf->port = pkt->iff;
- if (pkt->l2info & htonl(F_RXF_IP)) {
- mbuf->ol_flags |= PKT_RX_IPV4_HDR;
- if (unlikely(!csum_ok))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
- if ((pkt->l2info & htonl(F_RXF_UDP | F_RXF_TCP)) && !csum_ok)
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- } else if (pkt->l2info & htonl(F_RXF_IP6)) {
- mbuf->ol_flags |= PKT_RX_IPV6_HDR;
- }
-
- mbuf->port = pkt->iff;
-
- if (!rss_hdr->filter_tid && rss_hdr->hash_type) {
- mbuf->ol_flags |= PKT_RX_RSS_HASH;
- mbuf->hash.rss = ntohl(rss_hdr->hash_val);
- }
-
- if (pkt->vlan_ex) {
- mbuf->ol_flags |= PKT_RX_VLAN_PKT;
- mbuf->vlan_tci = ntohs(pkt->vlan);
- }
- rxq->stats.pkts++;
- rxq->stats.rx_bytes += mbuf->pkt_len;
-
- return 0;
-}
-
-/**
- * restore_rx_bufs - put back a packet's Rx buffers
- * @q: the SGE free list
- * @frags: number of FL buffers to restore
- *
- * Puts back on an FL the Rx buffers. The buffers have already been
- * unmapped and are left unmapped, we mark them so to prevent further
- * unmapping attempts.
- *
- * This function undoes a series of @unmap_rx_buf calls when we find out
- * that the current packet can't be processed right away afterall and we
- * need to come back to it later. This is a very rare event and there's
- * no effort to make this particularly efficient.
- */
-static void restore_rx_bufs(struct sge_fl *q, int frags)
-{
- while (frags--) {
- if (q->cidx == 0)
- q->cidx = q->size - 1;
- else
- q->cidx--;
- q->avail++;
- }
-}
-
-/**
- * is_new_response - check if a response is newly written
- * @r: the response descriptor
- * @q: the response queue
- *
- * Returns true if a response descriptor contains a yet unprocessed
- * response.
- */
-static inline bool is_new_response(const struct rsp_ctrl *r,
- const struct sge_rspq *q)
-{
- return (r->u.type_gen >> S_RSPD_GEN) == q->gen;
-}
-
#define CXGB4_MSG_AN ((void *)1)
/**
}
}
+static inline void cxgbe_set_mbuf_info(struct rte_mbuf *pkt, uint32_t ptype,
+ uint64_t ol_flags)
+{
+ pkt->packet_type |= ptype;
+ pkt->ol_flags |= ol_flags;
+}
+
+static inline void cxgbe_fill_mbuf_info(struct adapter *adap,
+ const struct cpl_rx_pkt *cpl,
+ struct rte_mbuf *pkt)
+{
+ bool csum_ok;
+ u16 err_vec;
+
+ if (adap->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
+ else
+ err_vec = ntohs(cpl->err_vec);
+
+ csum_ok = cpl->csum_calc && !err_vec;
+
+ if (cpl->vlan_ex)
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER_VLAN,
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ else
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L2_ETHER, 0);
+
+ if (cpl->l2info & htonl(F_RXF_IP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV4,
+ csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+ PKT_RX_IP_CKSUM_BAD);
+ else if (cpl->l2info & htonl(F_RXF_IP6))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L3_IPV6,
+ csum_ok ? PKT_RX_IP_CKSUM_GOOD :
+ PKT_RX_IP_CKSUM_BAD);
+
+ if (cpl->l2info & htonl(F_RXF_TCP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_TCP,
+ csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+ PKT_RX_L4_CKSUM_BAD);
+ else if (cpl->l2info & htonl(F_RXF_UDP))
+ cxgbe_set_mbuf_info(pkt, RTE_PTYPE_L4_UDP,
+ csum_ok ? PKT_RX_L4_CKSUM_GOOD :
+ PKT_RX_L4_CKSUM_BAD);
+}
+
/**
* process_responses - process responses from an SGE response queue
* @q: the ingress queue to process
int budget_left = budget;
const struct rsp_ctrl *rc;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
- struct adapter *adapter = q->adapter;
while (likely(budget_left)) {
+ if (q->cidx == ntohs(q->stat->pidx))
+ break;
+
rc = (const struct rsp_ctrl *)
((const char *)q->cur_desc + (q->iqe_len - sizeof(*rc)));
- if (!is_new_response(rc, q))
- break;
-
/*
* Ensure response has been read
*/
rsp_type = G_RSPD_TYPE(rc->u.type_gen);
if (likely(rsp_type == X_RSPD_TYPE_FLBUF)) {
- struct pkt_gl si;
- const struct rx_sw_desc *rsd;
- struct rte_mbuf *pkt = NULL;
- u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
-
- si.usembufs = rxq->usembufs;
- /*
- * In "use mbufs" mode, we don't pack multiple
- * ingress packets per buffer (mbuf) so we
- * should _always_ get a "New Buffer" flags
- * from the SGE. Also, since we hand the
- * mbuf's up to the host stack for it to
- * eventually free, we don't release the mbuf's
- * in the driver (in contrast to the "packed
- * page" mode where the driver needs to
- * release its reference on the page buffers).
- */
- BUG_ON(!(len & F_RSPD_NEWBUF));
- len = G_RSPD_LEN(len);
- si.tot_len = len;
-
- /* gather packet fragments */
- for (frags = 0; len; frags++) {
- rsd = &rxq->fl.sdesc[rxq->fl.cidx];
- bufsz = min(get_buf_size(adapter, rsd), len);
+ struct sge *s = &q->adapter->sge;
+ unsigned int stat_pidx;
+ int stat_pidx_diff;
+
+ stat_pidx = ntohs(q->stat->pidx);
+ stat_pidx_diff = P_IDXDIFF(q, stat_pidx);
+ while (stat_pidx_diff && budget_left) {
+ const struct rx_sw_desc *rsd =
+ &rxq->fl.sdesc[rxq->fl.cidx];
+ const struct rss_header *rss_hdr =
+ (const void *)q->cur_desc;
+ const struct cpl_rx_pkt *cpl =
+ (const void *)&q->cur_desc[1];
+ struct rte_mbuf *pkt, *npkt;
+ u32 len, bufsz;
+
+ rc = (const struct rsp_ctrl *)
+ ((const char *)q->cur_desc +
+ (q->iqe_len - sizeof(*rc)));
+
+ rsp_type = G_RSPD_TYPE(rc->u.type_gen);
+ if (unlikely(rsp_type != X_RSPD_TYPE_FLBUF))
+ break;
+
+ len = ntohl(rc->pldbuflen_qid);
+ BUG_ON(!(len & F_RSPD_NEWBUF));
pkt = rsd->buf;
- pkt->data_len = bufsz;
- pkt->pkt_len = bufsz;
- si.mbufs[frags] = pkt;
- len -= bufsz;
- unmap_rx_buf(&rxq->fl);
- }
-
- si.va = RTE_PTR_ADD(si.mbufs[0]->buf_addr,
- si.mbufs[0]->data_off);
- rte_prefetch1(si.va);
-
- /*
- * For the "use mbuf" case here, we can end up
- * chewing through our Free List very rapidly
- * with one entry per Ingress packet getting
- * consumed. So if the handler() successfully
- * consumed the mbuf, check to see if we can
- * refill the Free List incrementally in the
- * loop ...
- */
- si.nfrags = frags;
- ret = q->handler(q, q->cur_desc, &si);
-
- if (unlikely(ret != 0)) {
- restore_rx_bufs(&rxq->fl, frags);
- } else {
+ npkt = pkt;
+ len = G_RSPD_LEN(len);
+ pkt->pkt_len = len;
+
+ /* Chain mbufs into len if necessary */
+ while (len) {
+ struct rte_mbuf *new_pkt = rsd->buf;
+
+ bufsz = min(get_buf_size(q->adapter,
+ rsd), len);
+ new_pkt->data_len = bufsz;
+ unmap_rx_buf(&rxq->fl);
+ len -= bufsz;
+ npkt->next = new_pkt;
+ npkt = new_pkt;
+ pkt->nb_segs++;
+ rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+ }
+ npkt->next = NULL;
+ pkt->nb_segs--;
+
+ cxgbe_fill_mbuf_info(q->adapter, cpl, pkt);
+
+ if (!rss_hdr->filter_tid &&
+ rss_hdr->hash_type) {
+ pkt->ol_flags |= PKT_RX_RSS_HASH;
+ pkt->hash.rss =
+ ntohl(rss_hdr->hash_val);
+ }
+
+ if (cpl->vlan_ex)
+ pkt->vlan_tci = ntohs(cpl->vlan);
+
+ rte_pktmbuf_adj(pkt, s->pktshift);
+ rxq->stats.pkts++;
+ rxq->stats.rx_bytes += pkt->pkt_len;
rx_pkts[budget - budget_left] = pkt;
- if (fl_cap(&rxq->fl) - rxq->fl.avail >= 8)
- __refill_fl(q->adapter, &rxq->fl);
- }
+ rspq_next(q);
+ budget_left--;
+ stat_pidx_diff--;
+ }
+ continue;
} else if (likely(rsp_type == X_RSPD_TYPE_CPL)) {
ret = q->handler(q, q->cur_desc, NULL);
} else {
* refill the Free List.
*/
- if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 8)
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
__refill_fl(q->adapter, &rxq->fl);
return budget - budget_left;
int cxgbe_poll(struct sge_rspq *q, struct rte_mbuf **rx_pkts,
unsigned int budget, unsigned int *work_done)
{
+ struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ unsigned int cidx_inc;
unsigned int params;
u32 val;
- int err = 0;
*work_done = process_responses(q, budget, rx_pkts);
- params = V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX);
- q->next_intr_params = params;
- val = V_CIDXINC(*work_done) | V_SEINTARM(params);
if (*work_done) {
- /*
- * If we don't have access to the new User GTS (T5+),
- * use the old doorbell mechanism; otherwise use the new
- * BAR2 mechanism.
- */
- if (unlikely(!q->bar2_addr))
- t4_write_reg(q->adapter, MYPF_REG(A_SGE_PF_GTS),
+ cidx_inc = R_IDXDIFF(q, gts_idx);
+
+ if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 64)
+ __refill_fl(q->adapter, &rxq->fl);
+
+ params = q->intr_params;
+ q->next_intr_params = params;
+ val = V_CIDXINC(cidx_inc) | V_SEINTARM(params);
+
+ if (unlikely(!q->bar2_addr)) {
+ u32 reg = is_pf4(q->adapter) ? MYPF_REG(A_SGE_PF_GTS) :
+ T4VF_SGE_BASE_ADDR +
+ A_SGE_VF_GTS;
+
+ t4_write_reg(q->adapter, reg,
val | V_INGRESSQID((u32)q->cntxt_id));
- else {
+ } else {
writel(val | V_INGRESSQID(q->bar2_qid),
- (void *)((uintptr_t)q->bar2_addr +
- SGE_UDB_GTS));
- /*
- * This Write memory Barrier will force the write to
- * the User Doorbell area to be flushed.
+ (void *)((uintptr_t)q->bar2_addr + SGE_UDB_GTS));
+ /* This Write memory Barrier will force the
+ * write to the User Doorbell area to be
+ * flushed.
*/
wmb();
}
+ q->gts_idx = q->cidx;
}
-
- return err;
+ return 0;
}
/**
int ret, flsz = 0;
struct fw_iq_cmd c;
struct sge *s = &adap->sge;
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
unsigned int nb_refill;
+ u8 pciechan;
/* Size needs to be multiple of 16, including status entry. */
- iq->size = roundup(iq->size, 16);
+ iq->size = cxgbe_roundup(iq->size, 16);
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.name, fwevtq ? "fwq_ring" : "rx_ring",
- eth_dev->data->port_id, queue_id);
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id,
+ fwevtq ? "fwq_ring" : "rx_ring");
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
iq->desc = alloc_ring(iq->size, iq->iqe_len, 0, &iq->phys_addr, NULL, 0,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_WRITE | F_FW_CMD_EXEC |
- V_FW_IQ_CMD_PFN(adap->pf) | V_FW_IQ_CMD_VFN(0));
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_IQ_CMD_PFN(adap->pf) |
+ V_FW_IQ_CMD_VFN(0));
+ if (cong >= 0)
+ c.iqns_to_fl0congen =
+ htonl(F_FW_IQ_CMD_IQFLINTCONGEN |
+ V_FW_IQ_CMD_IQTYPE(cong ?
+ FW_IQ_IQTYPE_NIC :
+ FW_IQ_IQTYPE_OFLD) |
+ F_FW_IQ_CMD_IQRO);
+ } else {
+ pciechan = pi->port_id;
+ }
+
c.alloc_to_len16 = htonl(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART |
(sizeof(c) / 16));
c.type_to_iqandstindex =
V_FW_IQ_CMD_IQASYNCH(fwevtq) |
V_FW_IQ_CMD_VIID(pi->viid) |
V_FW_IQ_CMD_IQANDST(intr_idx < 0) |
- V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT) |
+ V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_STATUS_PAGE) |
V_FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
-intr_idx - 1));
c.iqdroprss_to_iqesize =
- htons(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ htons(V_FW_IQ_CMD_IQPCIECH(pciechan) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
c.iqsize = htons(iq->size);
c.iqaddr = cpu_to_be64(iq->phys_addr);
- if (cong >= 0)
- c.iqns_to_fl0congen = htonl(F_FW_IQ_CMD_IQFLINTCONGEN);
if (fl) {
struct sge_eth_rxq *rxq = container_of(fl, struct sge_eth_rxq,
fl);
- enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
/*
* Allocate the ring for the hardware free list (with space
*/
if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
fl->size = s->fl_starve_thres - 1 + 2 * 8;
- fl->size = roundup(fl->size, 8);
+ fl->size = cxgbe_roundup(fl->size, 8);
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.name,
- fwevtq ? "fwq_ring" : "fl_ring",
- eth_dev->data->port_id, queue_id);
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id,
+ fwevtq ? "fwq_ring" : "fl_ring");
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
fl->desc = alloc_ring(fl->size, sizeof(__be64),
0 : F_FW_IQ_CMD_FL0PACKEN) |
F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO |
F_FW_IQ_CMD_FL0PADEN);
- if (cong >= 0)
+ if (is_pf4(adap) && cong >= 0)
c.iqns_to_fl0congen |=
htonl(V_FW_IQ_CMD_FL0CNGCHMAP(cong) |
F_FW_IQ_CMD_FL0CONGCIF |
* Hence maximum allowed burst size will be 448 bytes.
*/
c.fl0dcaen_to_fl0cidxfthresh =
- htons(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) |
- V_FW_IQ_CMD_FL0FBMAX((chip <= CHELSIO_T5) ?
- X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B));
+ htons(V_FW_IQ_CMD_FL0FBMIN(chip_ver <= CHELSIO_T5 ?
+ X_FETCHBURSTMIN_128B :
+ X_FETCHBURSTMIN_64B) |
+ V_FW_IQ_CMD_FL0FBMAX(chip_ver <= CHELSIO_T5 ?
+ X_FETCHBURSTMAX_512B :
+ X_FETCHBURSTMAX_256B));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
- ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret)
goto err;
iq->cur_desc = iq->desc;
iq->cidx = 0;
+ iq->gts_idx = 0;
iq->gen = 1;
iq->next_intr_params = iq->intr_params;
iq->cntxt_id = ntohs(c.iqid);
iq->bar2_addr = bar2_address(adap, iq->cntxt_id, T4_BAR2_QTYPE_INGRESS,
&iq->bar2_qid);
iq->size--; /* subtract status entry */
+ iq->stat = (void *)&iq->desc[iq->size * 8];
iq->eth_dev = eth_dev;
iq->handler = hnd;
+ iq->port_id = pi->pidx;
iq->mb_pool = mp;
/* set offset to -1 to distinguish ingress queues without FL */
* a lot easier to fix in one place ... For now we do something very
* simple (and hopefully less wrong).
*/
- if (!is_t4(adap->params.chip) && cong >= 0) {
+ if (is_pf4(adap) && !is_t4(adap->params.chip) && cong >= 0) {
u32 param, val;
int i;
refill_fl_err:
t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
- iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff);
+ iq->cntxt_id, fl->cntxt_id, 0xffff);
fl_nomem:
ret = -ENOMEM;
err:
return ret;
}
-static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id,
+ unsigned int abs_id)
{
q->cntxt_id = id;
+ q->abs_id = abs_id;
q->bar2_addr = bar2_address(adap, q->cntxt_id, T4_BAR2_QTYPE_EGRESS,
&q->bar2_qid);
q->cidx = 0;
int ret, nentries;
struct fw_eq_eth_cmd c;
struct sge *s = &adap->sge;
- struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private);
+ struct port_info *pi = eth_dev->data->dev_private;
char z_name[RTE_MEMZONE_NAMESIZE];
char z_name_sw[RTE_MEMZONE_NAMESIZE];
+ u8 pciechan;
/* Add status entries */
nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
- snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- eth_dev->driver->pci_drv.name, "tx_ring",
- eth_dev->data->port_id, queue_id);
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id, "tx_ring");
snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
- F_FW_CMD_WRITE | F_FW_CMD_EXEC |
- V_FW_EQ_ETH_CMD_PFN(adap->pf) |
- V_FW_EQ_ETH_CMD_VFN(0));
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC);
+ if (is_pf4(adap)) {
+ pciechan = pi->tx_chan;
+ c.op_to_vfn |= htonl(V_FW_EQ_ETH_CMD_PFN(adap->pf) |
+ V_FW_EQ_ETH_CMD_VFN(0));
+ } else {
+ pciechan = pi->port_id;
+ }
+
c.alloc_to_len16 = htonl(F_FW_EQ_ETH_CMD_ALLOC |
F_FW_EQ_ETH_CMD_EQSTART | (sizeof(c) / 16));
c.autoequiqe_to_viid = htonl(F_FW_EQ_ETH_CMD_AUTOEQUEQE |
V_FW_EQ_ETH_CMD_VIID(pi->viid));
c.fetchszm_to_iqid =
htonl(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+ V_FW_EQ_ETH_CMD_PCIECHN(pciechan) |
F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(iqid));
c.dcaen_to_eqsize =
htonl(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
V_FW_EQ_ETH_CMD_EQSIZE(nentries));
c.eqaddr = rte_cpu_to_be_64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (is_pf4(adap))
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ else
+ ret = t4vf_wr_mbox(adap, &c, sizeof(c), &c);
if (ret) {
rte_free(txq->q.sdesc);
txq->q.sdesc = NULL;
return ret;
}
- init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)));
+ init_txq(adap, &txq->q, G_FW_EQ_ETH_CMD_EQID(ntohl(c.eqid_pkd)),
+ G_FW_EQ_ETH_CMD_PHYSEQID(ntohl(c.physeqid_pkd)));
txq->stats.tso = 0;
txq->stats.pkts = 0;
txq->stats.tx_cso = 0;
txq->stats.mapping_err = 0;
txq->flags |= EQ_STOPPED;
txq->eth_dev = eth_dev;
+ txq->data = eth_dev->data;
t4_os_lock_init(&txq->txq_lock);
return 0;
}
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+ struct rte_eth_dev *eth_dev, uint16_t queue_id,
+ unsigned int iqid, int socket_id)
+{
+ int ret, nentries;
+ struct fw_eq_ctrl_cmd c;
+ struct sge *s = &adap->sge;
+ struct port_info *pi = eth_dev->data->dev_private;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+ char z_name_sw[RTE_MEMZONE_NAMESIZE];
+
+ /* Add status entries */
+ nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
+
+ snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ eth_dev->data->port_id, queue_id, "ctrl_tx_ring");
+ snprintf(z_name_sw, sizeof(z_name_sw), "%s_sw_ring", z_name);
+
+ txq->q.desc = alloc_ring(txq->q.size, sizeof(struct tx_desc),
+ 0, &txq->q.phys_addr,
+ NULL, 0, queue_id,
+ socket_id, z_name, z_name_sw);
+ if (!txq->q.desc)
+ return -ENOMEM;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
+ F_FW_CMD_WRITE | F_FW_CMD_EXEC |
+ V_FW_EQ_CTRL_CMD_PFN(adap->pf) |
+ V_FW_EQ_CTRL_CMD_VFN(0));
+ c.alloc_to_len16 = htonl(F_FW_EQ_CTRL_CMD_ALLOC |
+ F_FW_EQ_CTRL_CMD_EQSTART | (sizeof(c) / 16));
+ c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(0));
+ c.physeqid_pkd = htonl(0);
+ c.fetchszm_to_iqid =
+ htonl(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
+ V_FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+ F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(iqid));
+ c.dcaen_to_eqsize =
+ htonl(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
+ V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) |
+ V_FW_EQ_CTRL_CMD_EQSIZE(nentries));
+ c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
+ if (ret) {
+ txq->q.desc = NULL;
+ return ret;
+ }
+
+ init_txq(adap, &txq->q, G_FW_EQ_CTRL_CMD_EQID(ntohl(c.cmpliqid_eqid)),
+ G_FW_EQ_CTRL_CMD_EQID(ntohl(c. physeqid_pkd)));
+ txq->adapter = adap;
+ txq->full = 0;
+ return 0;
+}
+
static void free_txq(struct sge_txq *q)
{
q->cntxt_id = 0;
*/
void t4_free_sge_resources(struct adapter *adap)
{
- int i;
+ unsigned int i;
struct sge_eth_rxq *rxq = &adap->sge.ethrxq[0];
struct sge_eth_txq *txq = &adap->sge.ethtxq[0];
}
}
+ /* clean up control Tx queues */
+ for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+ struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+ if (cq->q.desc) {
+ reclaim_completed_tx_imm(&cq->q);
+ t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
+ cq->q.cntxt_id);
+ free_txq(&cq->q);
+ }
+ }
+
if (adap->sge.fw_evtq.desc)
free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
}
* The Page Size Buffer must be exactly equal to our Page Size and the
* Large Page Size Buffer should be 0 (per above) or a power of 2.
*/
- if (fl_small_pg != PAGE_SIZE ||
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
(fl_large_pg & (fl_large_pg - 1)) != 0) {
dev_err(adap, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);
int t4_sge_init(struct adapter *adap)
{
struct sge *s = &adap->sge;
- u32 sge_control, sge_control2, sge_conm_ctrl;
- unsigned int ingpadboundary, ingpackboundary;
+ u32 sge_control, sge_conm_ctrl;
int ret, egress_threshold;
/*
sge_control = t4_read_reg(adap, A_SGE_CONTROL);
s->pktshift = G_PKTSHIFT(sge_control);
s->stat_len = (sge_control & F_EGRSTATUSPAGESIZE) ? 128 : 64;
-
- /*
- * T4 uses a single control field to specify both the PCIe Padding and
- * Packing Boundary. T5 introduced the ability to specify these
- * separately. The actual Ingress Packet Data alignment boundary
- * within Packed Buffer Mode is the maximum of these two
- * specifications.
- */
- ingpadboundary = 1 << (G_INGPADBOUNDARY(sge_control) +
- X_INGPADBOUNDARY_SHIFT);
- s->fl_align = ingpadboundary;
-
- if (!is_t4(adap->params.chip) && !adap->use_unpacked_mode) {
- /*
- * T5 has a weird interpretation of one of the PCIe Packing
- * Boundary values. No idea why ...
- */
- sge_control2 = t4_read_reg(adap, A_SGE_CONTROL2);
- ingpackboundary = G_INGPACKBOUNDARY(sge_control2);
- if (ingpackboundary == X_INGPACKBOUNDARY_16B)
- ingpackboundary = 16;
- else
- ingpackboundary = 1 << (ingpackboundary +
- X_INGPACKBOUNDARY_SHIFT);
-
- s->fl_align = max(ingpadboundary, ingpackboundary);
- }
-
+ s->fl_align = t4_fl_pkt_align(adap);
ret = t4_sge_init_soft(adap);
if (ret < 0) {
dev_err(adap, "%s: t4_sge_init_soft failed, error %d\n",
return 0;
}
+
+int t4vf_sge_init(struct adapter *adap)
+{
+ struct sge_params *sge_params = &adap->params.sge;
+ u32 sge_ingress_queues_per_page;
+ u32 sge_egress_queues_per_page;
+ u32 sge_control, sge_control2;
+ u32 fl_small_pg, fl_large_pg;
+ u32 sge_ingress_rx_threshold;
+ u32 sge_timer_value_0_and_1;
+ u32 sge_timer_value_2_and_3;
+ u32 sge_timer_value_4_and_5;
+ u32 sge_congestion_control;
+ struct sge *s = &adap->sge;
+ unsigned int s_hps, s_qpp;
+ u32 sge_host_page_size;
+ u32 params[7], vals[7];
+ int v;
+
+ /* query basic params from fw */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_HOST_PAGE_SIZE));
+ params[2] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE0));
+ params[3] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_FL_BUFFER_SIZE1));
+ params[4] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_0_AND_1));
+ params[5] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_2_AND_3));
+ params[6] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_TIMER_VALUE_4_AND_5));
+ v = t4vf_query_params(adap, 7, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+
+ sge_control = vals[0];
+ sge_host_page_size = vals[1];
+ fl_small_pg = vals[2];
+ fl_large_pg = vals[3];
+ sge_timer_value_0_and_1 = vals[4];
+ sge_timer_value_2_and_3 = vals[5];
+ sge_timer_value_4_and_5 = vals[6];
+
+ /*
+ * Start by vetting the basic SGE parameters which have been set up by
+ * the Physical Function Driver.
+ */
+
+ /* We only bother using the Large Page logic if the Large Page Buffer
+ * is larger than our Page Size Buffer.
+ */
+ if (fl_large_pg <= fl_small_pg)
+ fl_large_pg = 0;
+
+ /* The Page Size Buffer must be exactly equal to our Page Size and the
+ * Large Page Size Buffer should be 0 (per above) or a power of 2.
+ */
+ if (fl_small_pg != CXGBE_PAGE_SIZE ||
+ (fl_large_pg & (fl_large_pg - 1)) != 0) {
+ dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
+ fl_small_pg, fl_large_pg);
+ return -EINVAL;
+ }
+
+ if ((sge_control & F_RXPKTCPLMODE) !=
+ V_RXPKTCPLMODE(X_RXPKTCPLMODE_SPLIT)) {
+ dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
+ return -EINVAL;
+ }
+
+
+ /* Grab ingress packing boundary from SGE_CONTROL2 for */
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONTROL2));
+ v = t4vf_query_params(adap, 1, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_err(adapter, "Unable to get SGE Control2; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_control2 = vals[0];
+
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_RX_THRESHOLD));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_CONM_CTRL));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS)
+ return v;
+ sge_ingress_rx_threshold = vals[0];
+ sge_congestion_control = vals[1];
+ params[0] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_EGRESS_QUEUES_PER_PAGE_VF));
+ params[1] = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
+ V_FW_PARAMS_PARAM_XYZ(A_SGE_INGRESS_QUEUES_PER_PAGE_VF));
+ v = t4vf_query_params(adap, 2, params, vals);
+ if (v != FW_SUCCESS) {
+ dev_warn(adap, "Unable to get VF SGE Queues/Page; "
+ "probably old firmware.\n");
+ return v;
+ }
+ sge_egress_queues_per_page = vals[0];
+ sge_ingress_queues_per_page = vals[1];
+
+ /*
+ * We need the Queues/Page for our VF. This is based on the
+ * PF from which we're instantiated and is indexed in the
+ * register we just read.
+ */
+ s_hps = (S_HOSTPAGESIZEPF0 +
+ (S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * adap->pf);
+ sge_params->hps =
+ ((sge_host_page_size >> s_hps) & M_HOSTPAGESIZEPF0);
+
+ s_qpp = (S_QUEUESPERPAGEPF0 +
+ (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * adap->pf);
+ sge_params->eq_qpp =
+ ((sge_egress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+ sge_params->iq_qpp =
+ ((sge_ingress_queues_per_page >> s_qpp)
+ & M_QUEUESPERPAGEPF0);
+
+ /*
+ * Now translate the queried parameters into our internal forms.
+ */
+ if (fl_large_pg)
+ s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
+ s->stat_len = ((sge_control & F_EGRSTATUSPAGESIZE)
+ ? 128 : 64);
+ s->pktshift = G_PKTSHIFT(sge_control);
+ s->fl_align = t4vf_fl_pkt_align(adap, sge_control, sge_control2);
+
+ /*
+ * A FL with <= fl_starve_thres buffers is starving and a periodic
+ * timer will attempt to refill it. This needs to be larger than the
+ * SGE's Egress Congestion Threshold. If it isn't, then we can get
+ * stuck waiting for new packets while the SGE is waiting for us to
+ * give it more Free List entries. (Note that the SGE's Egress
+ * Congestion Threshold is in units of 2 Free List pointers.)
+ */
+ switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+ case CHELSIO_T5:
+ s->fl_starve_thres =
+ G_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ case CHELSIO_T6:
+ default:
+ s->fl_starve_thres =
+ G_T6_EGRTHRESHOLDPACKING(sge_congestion_control);
+ break;
+ }
+ s->fl_starve_thres = s->fl_starve_thres * 2 + 1;
+
+ /*
+ * Save RX interrupt holdoff timer values and counter
+ * threshold values from the SGE parameters.
+ */
+ s->timer_val[0] = core_ticks_to_us(adap,
+ G_TIMERVALUE0(sge_timer_value_0_and_1));
+ s->timer_val[1] = core_ticks_to_us(adap,
+ G_TIMERVALUE1(sge_timer_value_0_and_1));
+ s->timer_val[2] = core_ticks_to_us(adap,
+ G_TIMERVALUE2(sge_timer_value_2_and_3));
+ s->timer_val[3] = core_ticks_to_us(adap,
+ G_TIMERVALUE3(sge_timer_value_2_and_3));
+ s->timer_val[4] = core_ticks_to_us(adap,
+ G_TIMERVALUE4(sge_timer_value_4_and_5));
+ s->timer_val[5] = core_ticks_to_us(adap,
+ G_TIMERVALUE5(sge_timer_value_4_and_5));
+ s->counter_val[0] = G_THRESHOLD_0(sge_ingress_rx_threshold);
+ s->counter_val[1] = G_THRESHOLD_1(sge_ingress_rx_threshold);
+ s->counter_val[2] = G_THRESHOLD_2(sge_ingress_rx_threshold);
+ s->counter_val[3] = G_THRESHOLD_3(sge_ingress_rx_threshold);
+ return 0;
+}