net/ixgbe/base: update X550 SFP identification
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6 #include "common.h"
7 #include "t4_tcb.h"
8 #include "t4_regs.h"
9 #include "cxgbe_filter.h"
10 #include "clip_tbl.h"
11 #include "l2t.h"
12
13 /**
14  * Initialize Hash Filters
15  */
16 int init_hash_filter(struct adapter *adap)
17 {
18         unsigned int n_user_filters;
19         unsigned int user_filter_perc;
20         int ret;
21         u32 params[7], val[7];
22
23 #define FW_PARAM_DEV(param) \
24         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
25         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
26
27 #define FW_PARAM_PFVF(param) \
28         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
29         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
30         V_FW_PARAMS_PARAM_Y(0) | \
31         V_FW_PARAMS_PARAM_Z(0))
32
33         params[0] = FW_PARAM_DEV(NTID);
34         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
35                               params, val);
36         if (ret < 0)
37                 return ret;
38         adap->tids.ntids = val[0];
39         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
40
41         user_filter_perc = 100;
42         n_user_filters = mult_frac(adap->tids.nftids,
43                                    user_filter_perc,
44                                    100);
45
46         adap->tids.nftids = n_user_filters;
47         adap->params.hash_filter = 1;
48         return 0;
49 }
50
51 /**
52  * Validate if the requested filter specification can be set by checking
53  * if the requested features have been enabled
54  */
55 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
56 {
57         u32 fconf;
58
59         /*
60          * Check for unconfigured fields being used.
61          */
62         fconf = adapter->params.tp.vlan_pri_map;
63
64 #define S(_field) \
65         (fs->val._field || fs->mask._field)
66 #define U(_mask, _field) \
67         (!(fconf & (_mask)) && S(_field))
68
69         if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
70             U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx))
71                 return -EOPNOTSUPP;
72
73 #undef S
74 #undef U
75
76         /*
77          * If the user is requesting that the filter action loop
78          * matching packets back out one of our ports, make sure that
79          * the egress port is in range.
80          */
81         if (fs->action == FILTER_SWITCH &&
82             fs->eport >= adapter->params.nports)
83                 return -ERANGE;
84
85         /*
86          * Don't allow various trivially obvious bogus out-of-range
87          * values ...
88          */
89         if (fs->val.iport >= adapter->params.nports)
90                 return -ERANGE;
91
92         return 0;
93 }
94
95 /**
96  * Get the queue to which the traffic must be steered to.
97  */
98 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
99                                       struct ch_filter_specification *fs)
100 {
101         struct port_info *pi = ethdev2pinfo(dev);
102         struct adapter *adapter = pi->adapter;
103         unsigned int iq;
104
105         /*
106          * If the user has requested steering matching Ingress Packets
107          * to a specific Queue Set, we need to make sure it's in range
108          * for the port and map that into the Absolute Queue ID of the
109          * Queue Set's Response Queue.
110          */
111         if (!fs->dirsteer) {
112                 iq = 0;
113         } else {
114                 /*
115                  * If the iq id is greater than the number of qsets,
116                  * then assume it is an absolute qid.
117                  */
118                 if (fs->iq < pi->n_rx_qsets)
119                         iq = adapter->sge.ethrxq[pi->first_qset +
120                                                  fs->iq].rspq.abs_id;
121                 else
122                         iq = fs->iq;
123         }
124
125         return iq;
126 }
127
128 /* Return an error number if the indicated filter isn't writable ... */
129 int writable_filter(struct filter_entry *f)
130 {
131         if (f->locked)
132                 return -EPERM;
133         if (f->pending)
134                 return -EBUSY;
135
136         return 0;
137 }
138
139 /**
140  * Send CPL_SET_TCB_FIELD message
141  */
142 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
143                           u16 word, u64 mask, u64 val, int no_reply)
144 {
145         struct rte_mbuf *mbuf;
146         struct cpl_set_tcb_field *req;
147         struct sge_ctrl_txq *ctrlq;
148
149         ctrlq = &adapter->sge.ctrlq[0];
150         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
151         WARN_ON(!mbuf);
152
153         mbuf->data_len = sizeof(*req);
154         mbuf->pkt_len = mbuf->data_len;
155
156         req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
157         memset(req, 0, sizeof(*req));
158         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
159         req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
160                                       V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
161                                       V_NO_REPLY(no_reply));
162         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
163         req->mask = cpu_to_be64(mask);
164         req->val = cpu_to_be64(val);
165
166         t4_mgmt_tx(ctrlq, mbuf);
167 }
168
169 /**
170  * Set one of the t_flags bits in the TCB.
171  */
172 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
173                           unsigned int bit_pos, unsigned int val, int no_reply)
174 {
175         set_tcb_field(adap, ftid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
176                       (unsigned long long)val << bit_pos, no_reply);
177 }
178
179 /**
180  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
181  */
182 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
183                                         struct cpl_set_tcb_field *req,
184                                         unsigned int word,
185                                         u64 mask, u64 val, u8 cookie,
186                                         int no_reply)
187 {
188         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
189         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
190
191         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
192                                       V_ULP_TXPKT_DEST(0));
193         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
194         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
195         sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
196         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
197         req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
198                                       V_QUEUENO(0));
199         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
200         req->mask = cpu_to_be64(mask);
201         req->val = cpu_to_be64(val);
202         sc = (struct ulptx_idata *)(req + 1);
203         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
204         sc->len = cpu_to_be32(0);
205 }
206
207 /**
208  * Check if entry already filled.
209  */
210 bool is_filter_set(struct tid_info *t, int fidx, int family)
211 {
212         bool result = FALSE;
213         int i, max;
214
215         /* IPv6 requires four slots and IPv4 requires only 1 slot.
216          * Ensure, there's enough slots available.
217          */
218         max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
219
220         t4_os_lock(&t->ftid_lock);
221         for (i = fidx; i <= max; i++) {
222                 if (rte_bitmap_get(t->ftid_bmap, i)) {
223                         result = TRUE;
224                         break;
225                 }
226         }
227         t4_os_unlock(&t->ftid_lock);
228         return result;
229 }
230
231 /**
232  * Allocate a available free entry
233  */
234 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
235 {
236         struct tid_info *t = &adap->tids;
237         int pos;
238         int size = t->nftids;
239
240         t4_os_lock(&t->ftid_lock);
241         if (family == FILTER_TYPE_IPV6)
242                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
243         else
244                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
245         t4_os_unlock(&t->ftid_lock);
246
247         return pos < size ? pos : -1;
248 }
249
250 /**
251  * Construct hash filter ntuple.
252  */
253 static u64 hash_filter_ntuple(const struct filter_entry *f)
254 {
255         struct adapter *adap = ethdev2adap(f->dev);
256         struct tp_params *tp = &adap->params.tp;
257         u64 ntuple = 0;
258         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
259
260         if (tp->port_shift >= 0)
261                 ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
262
263         if (tp->protocol_shift >= 0) {
264                 if (!f->fs.val.proto)
265                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
266                 else
267                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
268         }
269
270         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
271                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
272         if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
273                 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
274
275         if (ntuple != tp->hash_filter_mask)
276                 return 0;
277
278         return ntuple;
279 }
280
281 /**
282  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
283  */
284 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
285                              unsigned int tid)
286 {
287         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
288         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
289
290         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
291                                       V_ULP_TXPKT_DEST(0));
292         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
293         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
294         sc->len = cpu_to_be32(sizeof(*abort_req) -
295                               sizeof(struct work_request_hdr));
296         OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
297         abort_req->rsvd0 = cpu_to_be32(0);
298         abort_req->rsvd1 = 0;
299         abort_req->cmd = CPL_ABORT_NO_RST;
300         sc = (struct ulptx_idata *)(abort_req + 1);
301         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
302         sc->len = cpu_to_be32(0);
303 }
304
305 /**
306  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
307  */
308 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
309                              unsigned int tid)
310 {
311         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
312         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
313
314         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
315                                       V_ULP_TXPKT_DEST(0));
316         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
317         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
318         sc->len = cpu_to_be32(sizeof(*abort_rpl) -
319                               sizeof(struct work_request_hdr));
320         OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
321         abort_rpl->rsvd0 = cpu_to_be32(0);
322         abort_rpl->rsvd1 = 0;
323         abort_rpl->cmd = CPL_ABORT_NO_RST;
324         sc = (struct ulptx_idata *)(abort_rpl + 1);
325         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
326         sc->len = cpu_to_be32(0);
327 }
328
329 /**
330  * Delete the specified hash filter.
331  */
332 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
333                                  unsigned int filter_id,
334                                  struct filter_ctx *ctx)
335 {
336         struct adapter *adapter = ethdev2adap(dev);
337         struct tid_info *t = &adapter->tids;
338         struct filter_entry *f;
339         struct sge_ctrl_txq *ctrlq;
340         unsigned int port_id = ethdev2pinfo(dev)->port_id;
341         int ret;
342
343         if (filter_id > adapter->tids.ntids)
344                 return -E2BIG;
345
346         f = lookup_tid(t, filter_id);
347         if (!f) {
348                 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
349                         __func__, filter_id);
350                 return -EINVAL;
351         }
352
353         ret = writable_filter(f);
354         if (ret)
355                 return ret;
356
357         if (f->valid) {
358                 unsigned int wrlen;
359                 struct rte_mbuf *mbuf;
360                 struct work_request_hdr *wr;
361                 struct ulptx_idata *aligner;
362                 struct cpl_set_tcb_field *req;
363                 struct cpl_abort_req *abort_req;
364                 struct cpl_abort_rpl *abort_rpl;
365
366                 f->ctx = ctx;
367                 f->pending = 1;
368
369                 wrlen = cxgbe_roundup(sizeof(*wr) +
370                                       (sizeof(*req) + sizeof(*aligner)) +
371                                       sizeof(*abort_req) + sizeof(*abort_rpl),
372                                       16);
373
374                 ctrlq = &adapter->sge.ctrlq[port_id];
375                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
376                 if (!mbuf) {
377                         dev_err(adapter, "%s: could not allocate skb ..\n",
378                                 __func__);
379                         goto out_err;
380                 }
381
382                 mbuf->data_len = wrlen;
383                 mbuf->pkt_len = mbuf->data_len;
384
385                 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
386                 INIT_ULPTX_WR(req, wrlen, 0, 0);
387                 wr = (struct work_request_hdr *)req;
388                 wr++;
389                 req = (struct cpl_set_tcb_field *)wr;
390                 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
391                                 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
392                                 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
393                                 0, 1);
394                 aligner = (struct ulptx_idata *)(req + 1);
395                 abort_req = (struct cpl_abort_req *)(aligner + 1);
396                 mk_abort_req_ulp(abort_req, f->tid);
397                 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
398                 mk_abort_rpl_ulp(abort_rpl, f->tid);
399                 t4_mgmt_tx(ctrlq, mbuf);
400         }
401         return 0;
402
403 out_err:
404         return -ENOMEM;
405 }
406
407 /**
408  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
409  */
410 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
411                              unsigned int qid_filterid, struct adapter *adap)
412 {
413         struct cpl_t6_act_open_req6 *req = NULL;
414         u64 local_lo, local_hi, peer_lo, peer_hi;
415         u32 *lip = (u32 *)f->fs.val.lip;
416         u32 *fip = (u32 *)f->fs.val.fip;
417
418         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
419         case CHELSIO_T6:
420                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
421
422                 INIT_TP_WR(req, 0);
423                 break;
424         default:
425                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
426                 return;
427         }
428
429         local_hi = ((u64)lip[1]) << 32 | lip[0];
430         local_lo = ((u64)lip[3]) << 32 | lip[2];
431         peer_hi = ((u64)fip[1]) << 32 | fip[0];
432         peer_lo = ((u64)fip[3]) << 32 | fip[2];
433
434         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
435                                                     qid_filterid));
436         req->local_port = cpu_to_be16(f->fs.val.lport);
437         req->peer_port = cpu_to_be16(f->fs.val.fport);
438         req->local_ip_hi = local_hi;
439         req->local_ip_lo = local_lo;
440         req->peer_ip_hi = peer_hi;
441         req->peer_ip_lo = peer_lo;
442         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
443                                         f->fs.newvlan == VLAN_REWRITE) |
444                                 V_DELACK(f->fs.hitcnts) |
445                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
446                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
447                                            << 1) |
448                                 V_TX_CHAN(f->fs.eport) |
449                                 V_ULP_MODE(ULP_MODE_NONE) |
450                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
451         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
452         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
453                             V_RSS_QUEUE(f->fs.iq) |
454                             F_T5_OPT_2_VALID |
455                             F_RX_CHANNEL |
456                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
457                                          (f->fs.dirsteer << 1)) |
458                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
459 }
460
461 /**
462  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
463  */
464 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
465                             unsigned int qid_filterid, struct adapter *adap)
466 {
467         struct cpl_t6_act_open_req *req = NULL;
468
469         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
470         case CHELSIO_T6:
471                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
472
473                 INIT_TP_WR(req, 0);
474                 break;
475         default:
476                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
477                 return;
478         }
479
480         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
481                                                     qid_filterid));
482         req->local_port = cpu_to_be16(f->fs.val.lport);
483         req->peer_port = cpu_to_be16(f->fs.val.fport);
484         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
485                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
486         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
487                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
488         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
489                                         f->fs.newvlan == VLAN_REWRITE) |
490                                 V_DELACK(f->fs.hitcnts) |
491                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
492                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
493                                            << 1) |
494                                 V_TX_CHAN(f->fs.eport) |
495                                 V_ULP_MODE(ULP_MODE_NONE) |
496                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
497         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
498         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
499                             V_RSS_QUEUE(f->fs.iq) |
500                             F_T5_OPT_2_VALID |
501                             F_RX_CHANNEL |
502                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
503                                          (f->fs.dirsteer << 1)) |
504                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
505 }
506
507 /**
508  * Set the specified hash filter.
509  */
510 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
511                                  struct ch_filter_specification *fs,
512                                  struct filter_ctx *ctx)
513 {
514         struct port_info *pi = ethdev2pinfo(dev);
515         struct adapter *adapter = pi->adapter;
516         struct tid_info *t = &adapter->tids;
517         struct filter_entry *f;
518         struct rte_mbuf *mbuf;
519         struct sge_ctrl_txq *ctrlq;
520         unsigned int iq;
521         int atid, size;
522         int ret = 0;
523
524         ret = validate_filter(adapter, fs);
525         if (ret)
526                 return ret;
527
528         iq = get_filter_steerq(dev, fs);
529
530         ctrlq = &adapter->sge.ctrlq[pi->port_id];
531
532         f = t4_os_alloc(sizeof(*f));
533         if (!f)
534                 goto out_err;
535
536         f->fs = *fs;
537         f->ctx = ctx;
538         f->dev = dev;
539         f->fs.iq = iq;
540
541         /*
542          * If the new filter requires loopback Destination MAC and/or VLAN
543          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
544          * the filter.
545          */
546         if (f->fs.newvlan == VLAN_INSERT ||
547             f->fs.newvlan == VLAN_REWRITE) {
548                 /* allocate L2T entry for new filter */
549                 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
550                                                    f->fs.eport, f->fs.dmac);
551                 if (!f->l2t) {
552                         ret = -ENOMEM;
553                         goto out_err;
554                 }
555         }
556
557         atid = cxgbe_alloc_atid(t, f);
558         if (atid < 0)
559                 goto out_err;
560
561         if (f->fs.type) {
562                 /* IPv6 hash filter */
563                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
564                 if (!f->clipt)
565                         goto free_atid;
566
567                 size = sizeof(struct cpl_t6_act_open_req6);
568                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
569                 if (!mbuf) {
570                         ret = -ENOMEM;
571                         goto free_clip;
572                 }
573
574                 mbuf->data_len = size;
575                 mbuf->pkt_len = mbuf->data_len;
576
577                 mk_act_open_req6(f, mbuf,
578                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
579                                  adapter);
580         } else {
581                 /* IPv4 hash filter */
582                 size = sizeof(struct cpl_t6_act_open_req);
583                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
584                 if (!mbuf) {
585                         ret = -ENOMEM;
586                         goto free_atid;
587                 }
588
589                 mbuf->data_len = size;
590                 mbuf->pkt_len = mbuf->data_len;
591
592                 mk_act_open_req(f, mbuf,
593                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
594                                 adapter);
595         }
596
597         f->pending = 1;
598         t4_mgmt_tx(ctrlq, mbuf);
599         return 0;
600
601 free_clip:
602         cxgbe_clip_release(f->dev, f->clipt);
603 free_atid:
604         cxgbe_free_atid(t, atid);
605
606 out_err:
607         t4_os_free(f);
608         return ret;
609 }
610
611 /**
612  * Clear a filter and release any of its resources that we own.  This also
613  * clears the filter's "pending" status.
614  */
615 void clear_filter(struct filter_entry *f)
616 {
617         if (f->clipt)
618                 cxgbe_clip_release(f->dev, f->clipt);
619
620         /*
621          * The zeroing of the filter rule below clears the filter valid,
622          * pending, locked flags etc. so it's all we need for
623          * this operation.
624          */
625         memset(f, 0, sizeof(*f));
626 }
627
628 /**
629  * t4_mk_filtdelwr - create a delete filter WR
630  * @ftid: the filter ID
631  * @wr: the filter work request to populate
632  * @qid: ingress queue to receive the delete notification
633  *
634  * Creates a filter work request to delete the supplied filter.  If @qid is
635  * negative the delete notification is suppressed.
636  */
637 static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
638 {
639         memset(wr, 0, sizeof(*wr));
640         wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
641         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
642         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
643                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
644         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
645         if (qid >= 0)
646                 wr->rx_chan_rx_rpl_iq =
647                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
648 }
649
650 /**
651  * Create FW work request to delete the filter at a specified index
652  */
653 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
654 {
655         struct adapter *adapter = ethdev2adap(dev);
656         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
657         struct rte_mbuf *mbuf;
658         struct fw_filter_wr *fwr;
659         struct sge_ctrl_txq *ctrlq;
660         unsigned int port_id = ethdev2pinfo(dev)->port_id;
661
662         ctrlq = &adapter->sge.ctrlq[port_id];
663         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
664         if (!mbuf)
665                 return -ENOMEM;
666
667         mbuf->data_len = sizeof(*fwr);
668         mbuf->pkt_len = mbuf->data_len;
669
670         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
671         t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
672
673         /*
674          * Mark the filter as "pending" and ship off the Filter Work Request.
675          * When we get the Work Request Reply we'll clear the pending status.
676          */
677         f->pending = 1;
678         t4_mgmt_tx(ctrlq, mbuf);
679         return 0;
680 }
681
682 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
683 {
684         struct adapter *adapter = ethdev2adap(dev);
685         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
686         struct rte_mbuf *mbuf;
687         struct fw_filter_wr *fwr;
688         struct sge_ctrl_txq *ctrlq;
689         unsigned int port_id = ethdev2pinfo(dev)->port_id;
690         int ret;
691
692         /*
693          * If the new filter requires loopback Destination MAC and/or VLAN
694          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
695          * the filter.
696          */
697         if (f->fs.newvlan) {
698                 /* allocate L2T entry for new filter */
699                 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
700                                                    f->fs.eport, f->fs.dmac);
701                 if (!f->l2t)
702                         return -ENOMEM;
703         }
704
705         ctrlq = &adapter->sge.ctrlq[port_id];
706         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
707         if (!mbuf) {
708                 ret = -ENOMEM;
709                 goto out;
710         }
711
712         mbuf->data_len = sizeof(*fwr);
713         mbuf->pkt_len = mbuf->data_len;
714
715         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
716         memset(fwr, 0, sizeof(*fwr));
717
718         /*
719          * Construct the work request to set the filter.
720          */
721         fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
722         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
723         fwr->tid_to_iq =
724                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
725                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
726                             V_FW_FILTER_WR_NOREPLY(0) |
727                             V_FW_FILTER_WR_IQ(f->fs.iq));
728         fwr->del_filter_to_l2tix =
729                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
730                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
731                             V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
732                             V_FW_FILTER_WR_INSVLAN
733                                 (f->fs.newvlan == VLAN_INSERT ||
734                                  f->fs.newvlan == VLAN_REWRITE) |
735                             V_FW_FILTER_WR_RMVLAN
736                                 (f->fs.newvlan == VLAN_REMOVE ||
737                                  f->fs.newvlan == VLAN_REWRITE) |
738                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
739                             V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
740                             V_FW_FILTER_WR_PRIO(f->fs.prio) |
741                             V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
742         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
743         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
744         fwr->smac_sel = 0;
745         fwr->rx_chan_rx_rpl_iq =
746                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
747                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
748                                                      ));
749         fwr->maci_to_matchtypem =
750                 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
751                             V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
752                             V_FW_FILTER_WR_PORT(f->fs.val.iport) |
753                             V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
754         fwr->ptcl = f->fs.val.proto;
755         fwr->ptclm = f->fs.mask.proto;
756         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
757         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
758         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
759         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
760         fwr->lp = cpu_to_be16(f->fs.val.lport);
761         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
762         fwr->fp = cpu_to_be16(f->fs.val.fport);
763         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
764
765         /*
766          * Mark the filter as "pending" and ship off the Filter Work Request.
767          * When we get the Work Request Reply we'll clear the pending status.
768          */
769         f->pending = 1;
770         t4_mgmt_tx(ctrlq, mbuf);
771         return 0;
772
773 out:
774         return ret;
775 }
776
777 /**
778  * Set the corresponding entry in the bitmap. 4 slots are
779  * marked for IPv6, whereas only 1 slot is marked for IPv4.
780  */
781 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
782 {
783         t4_os_lock(&t->ftid_lock);
784         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
785                 t4_os_unlock(&t->ftid_lock);
786                 return -EBUSY;
787         }
788
789         if (family == FILTER_TYPE_IPV4) {
790                 rte_bitmap_set(t->ftid_bmap, fidx);
791         } else {
792                 rte_bitmap_set(t->ftid_bmap, fidx);
793                 rte_bitmap_set(t->ftid_bmap, fidx + 1);
794                 rte_bitmap_set(t->ftid_bmap, fidx + 2);
795                 rte_bitmap_set(t->ftid_bmap, fidx + 3);
796         }
797         t4_os_unlock(&t->ftid_lock);
798         return 0;
799 }
800
801 /**
802  * Clear the corresponding entry in the bitmap. 4 slots are
803  * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
804  */
805 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
806 {
807         t4_os_lock(&t->ftid_lock);
808         if (family == FILTER_TYPE_IPV4) {
809                 rte_bitmap_clear(t->ftid_bmap, fidx);
810         } else {
811                 rte_bitmap_clear(t->ftid_bmap, fidx);
812                 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
813                 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
814                 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
815         }
816         t4_os_unlock(&t->ftid_lock);
817 }
818
819 /**
820  * Check a delete filter request for validity and send it to the hardware.
821  * Return 0 on success, an error number otherwise.  We attach any provided
822  * filter operation context to the internal filter specification in order to
823  * facilitate signaling completion of the operation.
824  */
825 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
826                      struct ch_filter_specification *fs,
827                      struct filter_ctx *ctx)
828 {
829         struct port_info *pi = (struct port_info *)(dev->data->dev_private);
830         struct adapter *adapter = pi->adapter;
831         struct filter_entry *f;
832         unsigned int chip_ver;
833         int ret;
834
835         if (is_hashfilter(adapter) && fs->cap)
836                 return cxgbe_del_hash_filter(dev, filter_id, ctx);
837
838         if (filter_id >= adapter->tids.nftids)
839                 return -ERANGE;
840
841         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
842
843         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
844         if (!ret) {
845                 dev_warn(adap, "%s: could not find filter entry: %u\n",
846                          __func__, filter_id);
847                 return -EINVAL;
848         }
849
850         /*
851          * Ensure filter id is aligned on the 2 slot boundary for T6,
852          * and 4 slot boundary for cards below T6.
853          */
854         if (fs->type) {
855                 if (chip_ver < CHELSIO_T6)
856                         filter_id &= ~(0x3);
857                 else
858                         filter_id &= ~(0x1);
859         }
860
861         f = &adapter->tids.ftid_tab[filter_id];
862         ret = writable_filter(f);
863         if (ret)
864                 return ret;
865
866         if (f->valid) {
867                 f->ctx = ctx;
868                 cxgbe_clear_ftid(&adapter->tids,
869                                  f->tid - adapter->tids.ftid_base,
870                                  f->fs.type ? FILTER_TYPE_IPV6 :
871                                               FILTER_TYPE_IPV4);
872                 return del_filter_wr(dev, filter_id);
873         }
874
875         /*
876          * If the caller has passed in a Completion Context then we need to
877          * mark it as a successful completion so they don't stall waiting
878          * for it.
879          */
880         if (ctx) {
881                 ctx->result = 0;
882                 t4_complete(&ctx->completion);
883         }
884
885         return 0;
886 }
887
888 /**
889  * Check a Chelsio Filter Request for validity, convert it into our internal
890  * format and send it to the hardware.  Return 0 on success, an error number
891  * otherwise.  We attach any provided filter operation context to the internal
892  * filter specification in order to facilitate signaling completion of the
893  * operation.
894  */
895 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
896                      struct ch_filter_specification *fs,
897                      struct filter_ctx *ctx)
898 {
899         struct port_info *pi = ethdev2pinfo(dev);
900         struct adapter *adapter = pi->adapter;
901         unsigned int fidx, iq, fid_bit = 0;
902         struct filter_entry *f;
903         unsigned int chip_ver;
904         uint8_t bitoff[16] = {0};
905         int ret;
906
907         if (is_hashfilter(adapter) && fs->cap)
908                 return cxgbe_set_hash_filter(dev, fs, ctx);
909
910         if (filter_id >= adapter->tids.nftids)
911                 return -ERANGE;
912
913         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
914
915         ret = validate_filter(adapter, fs);
916         if (ret)
917                 return ret;
918
919         /*
920          * Ensure filter id is aligned on the 4 slot boundary for IPv6
921          * maskfull filters.
922          */
923         if (fs->type)
924                 filter_id &= ~(0x3);
925
926         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
927         if (ret)
928                 return -EBUSY;
929
930         iq = get_filter_steerq(dev, fs);
931
932         /*
933          * IPv6 filters occupy four slots and must be aligned on four-slot
934          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
935          * must be aligned on two-slot boundaries.
936          *
937          * IPv4 filters only occupy a single slot and have no alignment
938          * requirements but writing a new IPv4 filter into the middle
939          * of an existing IPv6 filter requires clearing the old IPv6
940          * filter.
941          */
942         if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
943                 /*
944                  * For T6, If our IPv4 filter isn't being written to a
945                  * multiple of two filter index and there's an IPv6
946                  * filter at the multiple of 2 base slot, then we need
947                  * to delete that IPv6 filter ...
948                  * For adapters below T6, IPv6 filter occupies 4 entries.
949                  */
950                 if (chip_ver < CHELSIO_T6)
951                         fidx = filter_id & ~0x3;
952                 else
953                         fidx = filter_id & ~0x1;
954
955                 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
956                         f = &adapter->tids.ftid_tab[fidx];
957                         if (f->valid)
958                                 return -EBUSY;
959                 }
960         } else { /* IPv6 */
961                 unsigned int max_filter_id;
962
963                 if (chip_ver < CHELSIO_T6) {
964                         /*
965                          * Ensure that the IPv6 filter is aligned on a
966                          * multiple of 4 boundary.
967                          */
968                         if (filter_id & 0x3)
969                                 return -EINVAL;
970
971                         max_filter_id = filter_id + 4;
972                 } else {
973                         /*
974                          * For T6, CLIP being enabled, IPv6 filter would occupy
975                          * 2 entries.
976                          */
977                         if (filter_id & 0x1)
978                                 return -EINVAL;
979
980                         max_filter_id = filter_id + 2;
981                 }
982
983                 /*
984                  * Check all except the base overlapping IPv4 filter
985                  * slots.
986                  */
987                 for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
988                         f = &adapter->tids.ftid_tab[fidx];
989                         if (f->valid)
990                                 return -EBUSY;
991                 }
992         }
993
994         /*
995          * Check to make sure that provided filter index is not
996          * already in use by someone else
997          */
998         f = &adapter->tids.ftid_tab[filter_id];
999         if (f->valid)
1000                 return -EBUSY;
1001
1002         fidx = adapter->tids.ftid_base + filter_id;
1003         fid_bit = filter_id;
1004         ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
1005                              fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
1006         if (ret)
1007                 return ret;
1008
1009         /*
1010          * Check to make sure the filter requested is writable ...
1011          */
1012         ret = writable_filter(f);
1013         if (ret) {
1014                 /* Clear the bits we have set above */
1015                 cxgbe_clear_ftid(&adapter->tids, fid_bit,
1016                                  fs->type ? FILTER_TYPE_IPV6 :
1017                                             FILTER_TYPE_IPV4);
1018                 return ret;
1019         }
1020
1021         /*
1022          * Allocate a clip table entry only if we have non-zero IPv6 address
1023          */
1024         if (chip_ver > CHELSIO_T5 && fs->type &&
1025             memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
1026                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
1027                 if (!f->clipt)
1028                         goto free_tid;
1029         }
1030
1031         /*
1032          * Convert the filter specification into our internal format.
1033          * We copy the PF/VF specification into the Outer VLAN field
1034          * here so the rest of the code -- including the interface to
1035          * the firmware -- doesn't have to constantly do these checks.
1036          */
1037         f->fs = *fs;
1038         f->fs.iq = iq;
1039         f->dev = dev;
1040
1041         /*
1042          * Attempt to set the filter.  If we don't succeed, we clear
1043          * it and return the failure.
1044          */
1045         f->ctx = ctx;
1046         f->tid = fidx; /* Save the actual tid */
1047         ret = set_filter_wr(dev, filter_id);
1048         if (ret) {
1049                 fid_bit = f->tid - adapter->tids.ftid_base;
1050                 goto free_tid;
1051         }
1052
1053         return ret;
1054
1055 free_tid:
1056         cxgbe_clear_ftid(&adapter->tids, fid_bit,
1057                          fs->type ? FILTER_TYPE_IPV6 :
1058                                     FILTER_TYPE_IPV4);
1059         clear_filter(f);
1060         return ret;
1061 }
1062
1063 /**
1064  * Handle a Hash filter write reply.
1065  */
1066 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1067 {
1068         struct tid_info *t = &adap->tids;
1069         struct filter_entry *f;
1070         struct filter_ctx *ctx = NULL;
1071         unsigned int tid = GET_TID(rpl);
1072         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1073                                       (be32_to_cpu(rpl->atid_status)));
1074         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1075
1076         f = lookup_atid(t, ftid);
1077         if (!f) {
1078                 dev_warn(adap, "%s: could not find filter entry: %d\n",
1079                          __func__, ftid);
1080                 return;
1081         }
1082
1083         ctx = f->ctx;
1084         f->ctx = NULL;
1085
1086         switch (status) {
1087         case CPL_ERR_NONE: {
1088                 f->tid = tid;
1089                 f->pending = 0;  /* asynchronous setup completed */
1090                 f->valid = 1;
1091
1092                 cxgbe_insert_tid(t, f, f->tid, 0);
1093                 cxgbe_free_atid(t, ftid);
1094                 if (ctx) {
1095                         ctx->tid = f->tid;
1096                         ctx->result = 0;
1097                 }
1098                 if (f->fs.hitcnts)
1099                         set_tcb_field(adap, tid,
1100                                       W_TCB_TIMESTAMP,
1101                                       V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1102                                       V_TCB_T_RTT_TS_RECENT_AGE
1103                                               (M_TCB_T_RTT_TS_RECENT_AGE),
1104                                       V_TCB_TIMESTAMP(0ULL) |
1105                                       V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1106                                       1);
1107                 if (f->fs.newvlan == VLAN_INSERT ||
1108                     f->fs.newvlan == VLAN_REWRITE)
1109                         set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1110                 break;
1111         }
1112         default:
1113                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1114                          __func__, status);
1115
1116                 if (ctx) {
1117                         if (status == CPL_ERR_TCAM_FULL)
1118                                 ctx->result = -EAGAIN;
1119                         else
1120                                 ctx->result = -EINVAL;
1121                 }
1122
1123                 cxgbe_free_atid(t, ftid);
1124                 t4_os_free(f);
1125         }
1126
1127         if (ctx)
1128                 t4_complete(&ctx->completion);
1129 }
1130
1131 /**
1132  * Handle a LE-TCAM filter write/deletion reply.
1133  */
1134 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1135 {
1136         struct filter_entry *f = NULL;
1137         unsigned int tid = GET_TID(rpl);
1138         int idx, max_fidx = adap->tids.nftids;
1139
1140         /* Get the corresponding filter entry for this tid */
1141         if (adap->tids.ftid_tab) {
1142                 /* Check this in normal filter region */
1143                 idx = tid - adap->tids.ftid_base;
1144                 if (idx >= max_fidx)
1145                         return;
1146
1147                 f = &adap->tids.ftid_tab[idx];
1148                 if (f->tid != tid)
1149                         return;
1150         }
1151
1152         /* We found the filter entry for this tid */
1153         if (f) {
1154                 unsigned int ret = G_COOKIE(rpl->cookie);
1155                 struct filter_ctx *ctx;
1156
1157                 /*
1158                  * Pull off any filter operation context attached to the
1159                  * filter.
1160                  */
1161                 ctx = f->ctx;
1162                 f->ctx = NULL;
1163
1164                 if (ret == FW_FILTER_WR_FLT_ADDED) {
1165                         f->pending = 0;  /* asynchronous setup completed */
1166                         f->valid = 1;
1167                         if (ctx) {
1168                                 ctx->tid = f->tid;
1169                                 ctx->result = 0;
1170                         }
1171                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1172                         /*
1173                          * Clear the filter when we get confirmation from the
1174                          * hardware that the filter has been deleted.
1175                          */
1176                         clear_filter(f);
1177                         if (ctx)
1178                                 ctx->result = 0;
1179                 } else {
1180                         /*
1181                          * Something went wrong.  Issue a warning about the
1182                          * problem and clear everything out.
1183                          */
1184                         dev_warn(adap, "filter %u setup failed with error %u\n",
1185                                  idx, ret);
1186                         clear_filter(f);
1187                         if (ctx)
1188                                 ctx->result = -EINVAL;
1189                 }
1190
1191                 if (ctx)
1192                         t4_complete(&ctx->completion);
1193         }
1194 }
1195
1196 /*
1197  * Retrieve the packet count for the specified filter.
1198  */
1199 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1200                            u64 *c, int hash, bool get_byte)
1201 {
1202         struct filter_entry *f;
1203         unsigned int tcb_base, tcbaddr;
1204         int ret;
1205
1206         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1207         if (is_hashfilter(adapter) && hash) {
1208                 if (fidx < adapter->tids.ntids) {
1209                         f = adapter->tids.tid_tab[fidx];
1210                         if (!f)
1211                                 return -EINVAL;
1212
1213                         if (is_t5(adapter->params.chip)) {
1214                                 *c = 0;
1215                                 return 0;
1216                         }
1217                         tcbaddr = tcb_base + (fidx * TCB_SIZE);
1218                         goto get_count;
1219                 } else {
1220                         return -ERANGE;
1221                 }
1222         } else {
1223                 if (fidx >= adapter->tids.nftids)
1224                         return -ERANGE;
1225
1226                 f = &adapter->tids.ftid_tab[fidx];
1227                 if (!f->valid)
1228                         return -EINVAL;
1229
1230                 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1231         }
1232
1233         f = &adapter->tids.ftid_tab[fidx];
1234         if (!f->valid)
1235                 return -EINVAL;
1236
1237 get_count:
1238         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1239                 /*
1240                  * For T5, the Filter Packet Hit Count is maintained as a
1241                  * 32-bit Big Endian value in the TCB field {timestamp}.
1242                  * Similar to the craziness above, instead of the filter hit
1243                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1244                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1245                  */
1246                 if (get_byte) {
1247                         unsigned int word_offset = 4;
1248                         __be64 be64_byte_count;
1249
1250                         t4_os_lock(&adapter->win0_lock);
1251                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1252                                            tcbaddr +
1253                                            (word_offset * sizeof(__be32)),
1254                                            sizeof(be64_byte_count),
1255                                            &be64_byte_count,
1256                                            T4_MEMORY_READ);
1257                         t4_os_unlock(&adapter->win0_lock);
1258                         if (ret < 0)
1259                                 return ret;
1260                         *c = be64_to_cpu(be64_byte_count);
1261                 } else {
1262                         unsigned int word_offset = 6;
1263                         __be32 be32_count;
1264
1265                         t4_os_lock(&adapter->win0_lock);
1266                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1267                                            tcbaddr +
1268                                            (word_offset * sizeof(__be32)),
1269                                            sizeof(be32_count), &be32_count,
1270                                            T4_MEMORY_READ);
1271                         t4_os_unlock(&adapter->win0_lock);
1272                         if (ret < 0)
1273                                 return ret;
1274                         *c = (u64)be32_to_cpu(be32_count);
1275                 }
1276         }
1277         return 0;
1278 }
1279
1280 /**
1281  * Handle a Hash filter delete reply.
1282  */
1283 void hash_del_filter_rpl(struct adapter *adap,
1284                          const struct cpl_abort_rpl_rss *rpl)
1285 {
1286         struct tid_info *t = &adap->tids;
1287         struct filter_entry *f;
1288         struct filter_ctx *ctx = NULL;
1289         unsigned int tid = GET_TID(rpl);
1290
1291         f = lookup_tid(t, tid);
1292         if (!f) {
1293                 dev_warn(adap, "%s: could not find filter entry: %u\n",
1294                          __func__, tid);
1295                 return;
1296         }
1297
1298         ctx = f->ctx;
1299         f->ctx = NULL;
1300
1301         f->valid = 0;
1302
1303         if (f->clipt)
1304                 cxgbe_clip_release(f->dev, f->clipt);
1305
1306         cxgbe_remove_tid(t, 0, tid, 0);
1307         t4_os_free(f);
1308
1309         if (ctx) {
1310                 ctx->result = 0;
1311                 t4_complete(&ctx->completion);
1312         }
1313 }