net/cxgbe: support to offload flows to HASH region
authorShagun Agrawal <shaguna@chelsio.com>
Fri, 29 Jun 2018 18:12:19 +0000 (23:42 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 4 Jul 2018 20:20:41 +0000 (22:20 +0200)
Add interface to offload flows to HASH region. Translate internal
filter specification to requests to offload flows to HASH region.
Save the returned hash index of the offloaded flow for deletion later.

Signed-off-by: Shagun Agrawal <shaguna@chelsio.com>
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
drivers/net/cxgbe/base/adapter.h
drivers/net/cxgbe/base/t4_msg.h
drivers/net/cxgbe/base/t4fw_interface.h
drivers/net/cxgbe/cxgbe_filter.c
drivers/net/cxgbe/cxgbe_filter.h
drivers/net/cxgbe/cxgbe_flow.c
drivers/net/cxgbe/cxgbe_main.c
drivers/net/cxgbe/cxgbe_ofld.h

index 3ed3252..e98dd21 100644 (file)
@@ -771,6 +771,17 @@ static inline void t4_complete(struct t4_completion *c)
        t4_os_unlock(&c->lock);
 }
 
+/**
+ * cxgbe_port_viid - get the VI id of a port
+ * @dev: the device for the port
+ *
+ * Return the VI id of the given port.
+ */
+static inline unsigned int cxgbe_port_viid(const struct rte_eth_dev *dev)
+{
+       return ethdev2pinfo(dev)->viid;
+}
+
 void *t4_alloc_mem(size_t size);
 void t4_free_mem(void *addr);
 #define t4_os_alloc(_size)     t4_alloc_mem((_size))
index 43d1cb6..4112ff2 100644 (file)
@@ -7,7 +7,10 @@
 #define T4_MSG_H
 
 enum {
+       CPL_ACT_OPEN_REQ      = 0x3,
+       CPL_ACT_OPEN_RPL      = 0x25,
        CPL_SET_TCB_RPL       = 0x3A,
+       CPL_ACT_OPEN_REQ6     = 0x83,
        CPL_SGE_EGR_UPDATE    = 0xA5,
        CPL_FW4_MSG           = 0xC0,
        CPL_FW6_MSG           = 0xE0,
@@ -15,6 +18,15 @@ enum {
        CPL_TX_PKT_XT         = 0xEE,
 };
 
+enum CPL_error {
+       CPL_ERR_NONE               = 0,
+       CPL_ERR_TCAM_FULL          = 3,
+};
+
+enum {
+       ULP_MODE_NONE          = 0,
+};
+
 enum {                     /* TX_PKT_XT checksum types */
        TX_CSUM_TCPIP  = 8,
        TX_CSUM_UDPIP  = 9,
@@ -26,13 +38,24 @@ union opcode_tid {
        __u8 opcode;
 };
 
+#define S_CPL_OPCODE    24
+#define V_CPL_OPCODE(x) ((x) << S_CPL_OPCODE)
+
 #define G_TID(x)    ((x) & 0xFFFFFF)
 
+/* tid is assumed to be 24-bits */
+#define MK_OPCODE_TID(opcode, tid) (V_CPL_OPCODE(opcode) | (tid))
+
 #define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
 
 /* extract the TID from a CPL command */
 #define GET_TID(cmd) (G_TID(be32_to_cpu(OPCODE_TID(cmd))))
 
+/* partitioning of TID fields that also carry a queue id */
+#define S_TID_TID    0
+#define M_TID_TID    0x3fff
+#define G_TID_TID(x) (((x) >> S_TID_TID) & M_TID_TID)
+
 struct rss_header {
        __u8 opcode;
 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
@@ -79,6 +102,93 @@ struct work_request_hdr {
 #define V_COOKIE(x) ((x) << S_COOKIE)
 #define G_COOKIE(x) (((x) >> S_COOKIE) & M_COOKIE)
 
+/* option 0 fields */
+#define S_DELACK    5
+#define V_DELACK(x) ((x) << S_DELACK)
+
+#define S_NON_OFFLOAD    7
+#define V_NON_OFFLOAD(x) ((x) << S_NON_OFFLOAD)
+#define F_NON_OFFLOAD    V_NON_OFFLOAD(1U)
+
+#define S_ULP_MODE    8
+#define V_ULP_MODE(x) ((x) << S_ULP_MODE)
+
+#define S_SMAC_SEL    28
+#define V_SMAC_SEL(x) ((__u64)(x) << S_SMAC_SEL)
+
+#define S_TCAM_BYPASS    48
+#define V_TCAM_BYPASS(x) ((__u64)(x) << S_TCAM_BYPASS)
+#define F_TCAM_BYPASS    V_TCAM_BYPASS(1ULL)
+
+/* option 2 fields */
+#define S_RSS_QUEUE    0
+#define V_RSS_QUEUE(x) ((x) << S_RSS_QUEUE)
+
+#define S_RSS_QUEUE_VALID    10
+#define V_RSS_QUEUE_VALID(x) ((x) << S_RSS_QUEUE_VALID)
+#define F_RSS_QUEUE_VALID    V_RSS_QUEUE_VALID(1U)
+
+#define S_CONG_CNTRL    14
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+
+#define S_RX_CHANNEL    26
+#define V_RX_CHANNEL(x) ((x) << S_RX_CHANNEL)
+#define F_RX_CHANNEL    V_RX_CHANNEL(1U)
+
+#define S_T5_OPT_2_VALID    31
+#define V_T5_OPT_2_VALID(x) ((x) << S_T5_OPT_2_VALID)
+#define F_T5_OPT_2_VALID    V_T5_OPT_2_VALID(1U)
+
+struct cpl_t6_act_open_req {
+       WR_HDR;
+       union opcode_tid ot;
+       __be16 local_port;
+       __be16 peer_port;
+       __be32 local_ip;
+       __be32 peer_ip;
+       __be64 opt0;
+       __be32 rsvd;
+       __be32 opt2;
+       __be64 params;
+       __be32 rsvd2;
+       __be32 opt3;
+};
+
+struct cpl_t6_act_open_req6 {
+       WR_HDR;
+       union opcode_tid ot;
+       __be16 local_port;
+       __be16 peer_port;
+       __be64 local_ip_hi;
+       __be64 local_ip_lo;
+       __be64 peer_ip_hi;
+       __be64 peer_ip_lo;
+       __be64 opt0;
+       __be32 rsvd;
+       __be32 opt2;
+       __be64 params;
+       __be32 rsvd2;
+       __be32 opt3;
+};
+
+#define S_FILTER_TUPLE 24
+#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
+
+struct cpl_act_open_rpl {
+       RSS_HDR
+       union opcode_tid ot;
+       __be32 atid_status;
+};
+
+/* cpl_act_open_rpl.atid_status fields */
+#define S_AOPEN_STATUS    0
+#define M_AOPEN_STATUS    0xFF
+#define G_AOPEN_STATUS(x) (((x) >> S_AOPEN_STATUS) & M_AOPEN_STATUS)
+
+#define S_AOPEN_ATID    8
+#define M_AOPEN_ATID    0xFFFFFF
+#define G_AOPEN_ATID(x) (((x) >> S_AOPEN_ATID) & M_AOPEN_ATID)
+
 struct cpl_set_tcb_rpl {
        RSS_HDR
        union opcode_tid ot;
index 2433bf2..19bcfc1 100644 (file)
@@ -55,6 +55,7 @@ enum fw_memtype {
 
 enum fw_wr_opcodes {
        FW_FILTER_WR            = 0x02,
+       FW_TP_WR                = 0x05,
        FW_ETH_TX_PKT_WR        = 0x08,
        FW_ETH_TX_PKTS_WR       = 0x09,
        FW_ETH_TX_PKT_VM_WR     = 0x11,
@@ -93,6 +94,11 @@ struct fw_wr_hdr {
 #define G_FW_WR_EQUEQ(x)       (((x) >> S_FW_WR_EQUEQ) & M_FW_WR_EQUEQ)
 #define F_FW_WR_EQUEQ          V_FW_WR_EQUEQ(1U)
 
+/* flow context identifier (lo)
+ */
+#define S_FW_WR_FLOWID         8
+#define V_FW_WR_FLOWID(x)      ((x) << S_FW_WR_FLOWID)
+
 /* length in units of 16-bytes (lo)
  */
 #define S_FW_WR_LEN16          0
index bb2ebaa..bac7aa2 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright(c) 2018 Chelsio Communications.
  * All rights reserved.
  */
-
+#include <rte_net.h>
 #include "common.h"
 #include "t4_regs.h"
 #include "cxgbe_filter.h"
@@ -159,6 +159,210 @@ int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
        return pos < size ? pos : -1;
 }
 
+/**
+ * Construct hash filter ntuple.
+ */
+static u64 hash_filter_ntuple(const struct filter_entry *f)
+{
+       struct adapter *adap = ethdev2adap(f->dev);
+       struct tp_params *tp = &adap->params.tp;
+       u64 ntuple = 0;
+       u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
+
+       if (tp->protocol_shift >= 0) {
+               if (!f->fs.val.proto)
+                       ntuple |= (u64)tcp_proto << tp->protocol_shift;
+               else
+                       ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
+       }
+
+       if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
+               ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
+
+       if (ntuple != tp->hash_filter_mask)
+               return 0;
+
+       return ntuple;
+}
+
+/**
+ * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
+ */
+static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
+                            unsigned int qid_filterid, struct adapter *adap)
+{
+       struct cpl_t6_act_open_req6 *req = NULL;
+       u64 local_lo, local_hi, peer_lo, peer_hi;
+       u32 *lip = (u32 *)f->fs.val.lip;
+       u32 *fip = (u32 *)f->fs.val.fip;
+
+       switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+       case CHELSIO_T6:
+               req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
+
+               INIT_TP_WR(req, 0);
+               break;
+       default:
+               dev_err(adap, "%s: unsupported chip type!\n", __func__);
+               return;
+       }
+
+       local_hi = ((u64)lip[1]) << 32 | lip[0];
+       local_lo = ((u64)lip[3]) << 32 | lip[2];
+       peer_hi = ((u64)fip[1]) << 32 | fip[0];
+       peer_lo = ((u64)fip[3]) << 32 | fip[2];
+
+       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+                                                   qid_filterid));
+       req->local_port = cpu_to_be16(f->fs.val.lport);
+       req->peer_port = cpu_to_be16(f->fs.val.fport);
+       req->local_ip_hi = local_hi;
+       req->local_ip_lo = local_lo;
+       req->peer_ip_hi = peer_hi;
+       req->peer_ip_lo = peer_lo;
+       req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+                               V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+                                          << 1) |
+                               V_ULP_MODE(ULP_MODE_NONE) |
+                               F_TCAM_BYPASS | F_NON_OFFLOAD);
+       req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+       req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+                           V_RSS_QUEUE(f->fs.iq) |
+                           F_T5_OPT_2_VALID |
+                           F_RX_CHANNEL |
+                           V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+                                        (f->fs.dirsteer << 1)));
+}
+
+/**
+ * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
+ */
+static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
+                           unsigned int qid_filterid, struct adapter *adap)
+{
+       struct cpl_t6_act_open_req *req = NULL;
+
+       switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
+       case CHELSIO_T6:
+               req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
+
+               INIT_TP_WR(req, 0);
+               break;
+       default:
+               dev_err(adap, "%s: unsupported chip type!\n", __func__);
+               return;
+       }
+
+       OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+                                                   qid_filterid));
+       req->local_port = cpu_to_be16(f->fs.val.lport);
+       req->peer_port = cpu_to_be16(f->fs.val.fport);
+       req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
+                       f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
+       req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
+                       f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
+       req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
+                               V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
+                                          << 1) |
+                               V_ULP_MODE(ULP_MODE_NONE) |
+                               F_TCAM_BYPASS | F_NON_OFFLOAD);
+       req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
+       req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
+                           V_RSS_QUEUE(f->fs.iq) |
+                           F_T5_OPT_2_VALID |
+                           F_RX_CHANNEL |
+                           V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
+                                        (f->fs.dirsteer << 1)));
+}
+
+/**
+ * Set the specified hash filter.
+ */
+static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
+                                struct ch_filter_specification *fs,
+                                struct filter_ctx *ctx)
+{
+       struct port_info *pi = ethdev2pinfo(dev);
+       struct adapter *adapter = pi->adapter;
+       struct tid_info *t = &adapter->tids;
+       struct filter_entry *f;
+       struct rte_mbuf *mbuf;
+       struct sge_ctrl_txq *ctrlq;
+       unsigned int iq;
+       int atid, size;
+       int ret = 0;
+
+       ret = validate_filter(adapter, fs);
+       if (ret)
+               return ret;
+
+       iq = get_filter_steerq(dev, fs);
+
+       ctrlq = &adapter->sge.ctrlq[pi->port_id];
+
+       f = t4_os_alloc(sizeof(*f));
+       if (!f)
+               goto out_err;
+
+       f->fs = *fs;
+       f->ctx = ctx;
+       f->dev = dev;
+       f->fs.iq = iq;
+
+       atid = cxgbe_alloc_atid(t, f);
+       if (atid < 0)
+               goto out_err;
+
+       if (f->fs.type) {
+               /* IPv6 hash filter */
+               f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
+               if (!f->clipt)
+                       goto free_atid;
+
+               size = sizeof(struct cpl_t6_act_open_req6);
+               mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+               if (!mbuf) {
+                       ret = -ENOMEM;
+                       goto free_clip;
+               }
+
+               mbuf->data_len = size;
+               mbuf->pkt_len = mbuf->data_len;
+
+               mk_act_open_req6(f, mbuf,
+                                ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+                                adapter);
+       } else {
+               /* IPv4 hash filter */
+               size = sizeof(struct cpl_t6_act_open_req);
+               mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
+               if (!mbuf) {
+                       ret = -ENOMEM;
+                       goto free_atid;
+               }
+
+               mbuf->data_len = size;
+               mbuf->pkt_len = mbuf->data_len;
+
+               mk_act_open_req(f, mbuf,
+                               ((adapter->sge.fw_evtq.abs_id << 14) | atid),
+                               adapter);
+       }
+
+       f->pending = 1;
+       t4_mgmt_tx(ctrlq, mbuf);
+       return 0;
+
+free_clip:
+       cxgbe_clip_release(f->dev, f->clipt);
+free_atid:
+       cxgbe_free_atid(t, atid);
+
+out_err:
+       t4_os_free(f);
+       return ret;
+}
+
 /**
  * Clear a filter and release any of its resources that we own.  This also
  * clears the filter's "pending" status.
@@ -425,6 +629,9 @@ int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
        uint8_t bitoff[16] = {0};
        int ret;
 
+       if (is_hashfilter(adapter) && fs->cap)
+               return cxgbe_set_hash_filter(dev, fs, ctx);
+
        if (filter_id >= adapter->tids.nftids)
                return -ERANGE;
 
@@ -578,6 +785,62 @@ free_tid:
        return ret;
 }
 
+/**
+ * Handle a Hash filter write reply.
+ */
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
+{
+       struct tid_info *t = &adap->tids;
+       struct filter_entry *f;
+       struct filter_ctx *ctx = NULL;
+       unsigned int tid = GET_TID(rpl);
+       unsigned int ftid = G_TID_TID(G_AOPEN_ATID
+                                     (be32_to_cpu(rpl->atid_status)));
+       unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
+
+       f = lookup_atid(t, ftid);
+       if (!f) {
+               dev_warn(adap, "%s: could not find filter entry: %d\n",
+                        __func__, ftid);
+               return;
+       }
+
+       ctx = f->ctx;
+       f->ctx = NULL;
+
+       switch (status) {
+       case CPL_ERR_NONE: {
+               f->tid = tid;
+               f->pending = 0;  /* asynchronous setup completed */
+               f->valid = 1;
+
+               cxgbe_insert_tid(t, f, f->tid, 0);
+               cxgbe_free_atid(t, ftid);
+               if (ctx) {
+                       ctx->tid = f->tid;
+                       ctx->result = 0;
+               }
+               break;
+       }
+       default:
+               dev_warn(adap, "%s: filter creation failed with status = %u\n",
+                        __func__, status);
+
+               if (ctx) {
+                       if (status == CPL_ERR_TCAM_FULL)
+                               ctx->result = -EAGAIN;
+                       else
+                               ctx->result = -EINVAL;
+               }
+
+               cxgbe_free_atid(t, ftid);
+               t4_os_free(f);
+       }
+
+       if (ctx)
+               t4_complete(&ctx->completion);
+}
+
 /**
  * Handle a LE-TCAM filter write/deletion reply.
  */
index ce115f6..7c469c8 100644 (file)
@@ -223,6 +223,7 @@ int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
                     struct filter_ctx *ctx);
 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family);
 int init_hash_filter(struct adapter *adap);
+void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl);
 int validate_filter(struct adapter *adap, struct ch_filter_specification *fs);
 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
                           u64 *c, bool get_byte);
index dfb5fac..4950cb4 100644 (file)
@@ -452,6 +452,7 @@ static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
 {
        struct ch_filter_specification *fs = &flow->fs;
        struct adapter *adap = ethdev2adap(dev);
+       struct tid_info *t = &adap->tids;
        struct filter_ctx ctx;
        unsigned int fidx;
        int err;
@@ -484,8 +485,13 @@ static int __cxgbe_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow)
                return ctx.result;
        }
 
-       flow->fidx = fidx;
-       flow->f = &adap->tids.ftid_tab[fidx];
+       if (fs->cap) { /* to destroy the filter */
+               flow->fidx = ctx.tid;
+               flow->f = lookup_tid(t, ctx.tid);
+       } else {
+               flow->fidx = fidx;
+               flow->f = &adap->tids.ftid_tab[fidx];
+       }
 
        return 0;
 }
index 2050fe4..c550dd5 100644 (file)
@@ -91,6 +91,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
                const struct cpl_set_tcb_rpl *p = (const void *)rsp;
 
                filter_rpl(q->adapter, p);
+       } else if (opcode == CPL_ACT_OPEN_RPL) {
+               const struct cpl_act_open_rpl *p = (const void *)rsp;
+
+               hash_filter_rpl(q->adapter, p);
        } else {
                dev_err(adapter, "unexpected CPL %#x on FW event queue\n",
                        opcode);
@@ -263,6 +267,58 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
        return 0;
 }
 
+/**
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgbe_alloc_atid(struct tid_info *t, void *data)
+{
+       int atid = -1;
+
+       t4_os_lock(&t->atid_lock);
+       if (t->afree) {
+               union aopen_entry *p = t->afree;
+
+               atid = p - t->atid_tab;
+               t->afree = p->next;
+               p->data = data;
+               t->atids_in_use++;
+       }
+       t4_os_unlock(&t->atid_lock);
+       return atid;
+}
+
+/**
+ * Release an active-open TID.
+ */
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid)
+{
+       union aopen_entry *p = &t->atid_tab[atid];
+
+       t4_os_lock(&t->atid_lock);
+       p->next = t->afree;
+       t->afree = p;
+       t->atids_in_use--;
+       t4_os_unlock(&t->atid_lock);
+}
+
+/**
+ * Insert a TID.
+ */
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+                     unsigned short family)
+{
+       t->tid_tab[tid] = data;
+       if (t->hash_base && tid >= t->hash_base) {
+               if (family == FILTER_TYPE_IPV4)
+                       rte_atomic32_inc(&t->hash_tids_in_use);
+       } else {
+               if (family == FILTER_TYPE_IPV4)
+                       rte_atomic32_inc(&t->tids_in_use);
+       }
+
+       rte_atomic32_inc(&t->conns_in_use);
+}
+
 /**
  * Free TID tables.
  */
index e97c424..798e398 100644 (file)
 
 #include "cxgbe_filter.h"
 
+#define INIT_TP_WR(w, tid) do { \
+       (w)->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_TP_WR) | \
+                               V_FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+       (w)->wr.wr_mid = cpu_to_be32( \
+                               V_FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+                               V_FW_WR_FLOWID(tid)); \
+       (w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
 /*
  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
  */
@@ -46,4 +55,20 @@ struct tid_info {
        rte_spinlock_t atid_lock __rte_cache_aligned;
        rte_spinlock_t ftid_lock;
 };
+
+static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
+{
+       return tid < t->ntids ? t->tid_tab[tid] : NULL;
+}
+
+static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
+{
+       return atid < t->natids ? t->atid_tab[atid].data : NULL;
+}
+
+int cxgbe_alloc_atid(struct tid_info *t, void *data);
+void cxgbe_free_atid(struct tid_info *t, unsigned int atid);
+void cxgbe_insert_tid(struct tid_info *t, void *data, unsigned int tid,
+                     unsigned short family);
+
 #endif /* _CXGBE_OFLD_H_ */