u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+ /* cached TP_OUT_CONFIG compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ int rx_pkt_encap;
+
/*
* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
* subset of the set of fields which may be present in the Compressed
&adap->params.tp.ingress_config, 1,
A_TP_INGRESS_CONFIG);
+ /* For T6, cache the adapter's compressed error vector
+ * and passing outer header info for encapsulated packets.
+ */
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
+ v = t4_read_reg(adap, A_TP_OUT_CONFIG);
+ adap->params.tp.rx_pkt_encap = (v & F_CRXPKTENC) ? 1 : 0;
+ }
+
/*
* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
* shift positions of several elements of the Compressed Filter Tuple
/*-
* BSD LICENSE
*
- * Copyright(c) 2014-2015 Chelsio Communications.
+ * Copyright(c) 2014-2017 Chelsio Communications.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#define V_RXF_IP6(x) ((x) << S_RXF_IP6)
#define F_RXF_IP6 V_RXF_IP6(1U)
+/* rx_pkt.err_vec fields */
+/* In T6, rx_pkt.err_vec indicates
+ * RxError Error vector (16b) or
+ * Encapsulating header length (8b),
+ * Outer encapsulation type (2b) and
+ * compressed error vector (6b) if CRxPktEnc is
+ * enabled in TP_OUT_CONFIG
+ */
+#define S_T6_COMPR_RXERR_VEC 0
+#define M_T6_COMPR_RXERR_VEC 0x3F
+#define V_T6_COMPR_RXERR_VEC(x) ((x) << S_T6_COMPR_RXERR_VEC)
+#define G_T6_COMPR_RXERR_VEC(x) \
+ (((x) >> S_T6_COMPR_RXERR_VEC) & M_T6_COMPR_RXERR_VEC)
+
/* cpl_fw*.type values */
enum {
FW_TYPE_RSSCPL = 4,
#define F_UPCRST V_UPCRST(1U)
/* registers for module TP */
+#define A_TP_OUT_CONFIG 0x7d04
+
+#define S_CRXPKTENC 3
+#define V_CRXPKTENC(x) ((x) << S_CRXPKTENC)
+#define F_CRXPKTENC V_CRXPKTENC(1U)
+
#define TP_BASE_ADDR 0x7d00
#define A_TP_TIMER_RESOLUTION 0x7d90
const struct rss_header *rss_hdr;
bool csum_ok;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+ u16 err_vec;
rss_hdr = (const void *)rsp;
pkt = (const void *)&rsp[1];
- csum_ok = pkt->csum_calc && !pkt->err_vec;
+ /* Compressed error vector is enabled for T6 only */
+ if (q->adapter->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(ntohs(pkt->err_vec));
+ else
+ err_vec = ntohs(pkt->err_vec);
+ csum_ok = pkt->csum_calc && !err_vec;
mbuf = t4_pktgl_to_mbuf(si);
if (unlikely(!mbuf)) {
(const void *)q->cur_desc;
const struct cpl_rx_pkt *cpl =
(const void *)&q->cur_desc[1];
- bool csum_ok = cpl->csum_calc && !cpl->err_vec;
struct rte_mbuf *pkt, *npkt;
u32 len, bufsz;
+ bool csum_ok;
+ u16 err_vec;
len = ntohl(rc->pldbuflen_qid);
BUG_ON(!(len & F_RSPD_NEWBUF));
len = G_RSPD_LEN(len);
pkt->pkt_len = len;
+ /* Compressed error vector is enabled for
+ * T6 only
+ */
+ if (q->adapter->params.tp.rx_pkt_encap)
+ err_vec = G_T6_COMPR_RXERR_VEC(
+ ntohs(cpl->err_vec));
+ else
+ err_vec = ntohs(cpl->err_vec);
+ csum_ok = cpl->csum_calc && !err_vec;
+
/* Chain mbufs into len if necessary */
while (len) {
struct rte_mbuf *new_pkt = rsd->buf;