net/cxgbe: fix slot allocation for IPv6 flows
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
11 #include "clip_tbl.h"
12 #include "l2t.h"
13
14 /**
15  * Initialize Hash Filters
16  */
17 int cxgbe_init_hash_filter(struct adapter *adap)
18 {
19         unsigned int n_user_filters;
20         unsigned int user_filter_perc;
21         int ret;
22         u32 params[7], val[7];
23
24 #define FW_PARAM_DEV(param) \
25         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
26         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
27
28 #define FW_PARAM_PFVF(param) \
29         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
30         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
31         V_FW_PARAMS_PARAM_Y(0) | \
32         V_FW_PARAMS_PARAM_Z(0))
33
34         params[0] = FW_PARAM_DEV(NTID);
35         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
36                               params, val);
37         if (ret < 0)
38                 return ret;
39         adap->tids.ntids = val[0];
40         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
41
42         user_filter_perc = 100;
43         n_user_filters = mult_frac(adap->tids.nftids,
44                                    user_filter_perc,
45                                    100);
46
47         adap->tids.nftids = n_user_filters;
48         adap->params.hash_filter = 1;
49         return 0;
50 }
51
52 /**
53  * Validate if the requested filter specification can be set by checking
54  * if the requested features have been enabled
55  */
56 int cxgbe_validate_filter(struct adapter *adapter,
57                           struct ch_filter_specification *fs)
58 {
59         u32 fconf;
60
61         /*
62          * Check for unconfigured fields being used.
63          */
64         fconf = adapter->params.tp.vlan_pri_map;
65
66 #define S(_field) \
67         (fs->val._field || fs->mask._field)
68 #define U(_mask, _field) \
69         (!(fconf & (_mask)) && S(_field))
70
71         if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
72             U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx))
73                 return -EOPNOTSUPP;
74
75 #undef S
76 #undef U
77
78         /*
79          * If the user is requesting that the filter action loop
80          * matching packets back out one of our ports, make sure that
81          * the egress port is in range.
82          */
83         if (fs->action == FILTER_SWITCH &&
84             fs->eport >= adapter->params.nports)
85                 return -ERANGE;
86
87         /*
88          * Don't allow various trivially obvious bogus out-of-range
89          * values ...
90          */
91         if (fs->val.iport >= adapter->params.nports)
92                 return -ERANGE;
93
94         if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
95                 return -EOPNOTSUPP;
96
97         if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
98                 return -EOPNOTSUPP;
99
100         return 0;
101 }
102
103 /**
104  * Get the queue to which the traffic must be steered to.
105  */
106 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
107                                       struct ch_filter_specification *fs)
108 {
109         struct port_info *pi = ethdev2pinfo(dev);
110         struct adapter *adapter = pi->adapter;
111         unsigned int iq;
112
113         /*
114          * If the user has requested steering matching Ingress Packets
115          * to a specific Queue Set, we need to make sure it's in range
116          * for the port and map that into the Absolute Queue ID of the
117          * Queue Set's Response Queue.
118          */
119         if (!fs->dirsteer) {
120                 iq = 0;
121         } else {
122                 /*
123                  * If the iq id is greater than the number of qsets,
124                  * then assume it is an absolute qid.
125                  */
126                 if (fs->iq < pi->n_rx_qsets)
127                         iq = adapter->sge.ethrxq[pi->first_qset +
128                                                  fs->iq].rspq.abs_id;
129                 else
130                         iq = fs->iq;
131         }
132
133         return iq;
134 }
135
136 /* Return an error number if the indicated filter isn't writable ... */
137 static int writable_filter(struct filter_entry *f)
138 {
139         if (f->locked)
140                 return -EPERM;
141         if (f->pending)
142                 return -EBUSY;
143
144         return 0;
145 }
146
147 /**
148  * Send CPL_SET_TCB_FIELD message
149  */
150 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
151                           u16 word, u64 mask, u64 val, int no_reply)
152 {
153         struct rte_mbuf *mbuf;
154         struct cpl_set_tcb_field *req;
155         struct sge_ctrl_txq *ctrlq;
156
157         ctrlq = &adapter->sge.ctrlq[0];
158         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
159         WARN_ON(!mbuf);
160
161         mbuf->data_len = sizeof(*req);
162         mbuf->pkt_len = mbuf->data_len;
163
164         req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
165         memset(req, 0, sizeof(*req));
166         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
167         req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
168                                       V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
169                                       V_NO_REPLY(no_reply));
170         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
171         req->mask = cpu_to_be64(mask);
172         req->val = cpu_to_be64(val);
173
174         t4_mgmt_tx(ctrlq, mbuf);
175 }
176
177 /**
178  * Set one of the t_flags bits in the TCB.
179  */
180 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
181                           unsigned int bit_pos, unsigned int val, int no_reply)
182 {
183         set_tcb_field(adap, ftid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
184                       (unsigned long long)val << bit_pos, no_reply);
185 }
186
187 /**
188  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
189  */
190 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
191                                         struct cpl_set_tcb_field *req,
192                                         unsigned int word,
193                                         u64 mask, u64 val, u8 cookie,
194                                         int no_reply)
195 {
196         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
197         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
198
199         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
200                                       V_ULP_TXPKT_DEST(0));
201         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
202         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
203         sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
204         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
205         req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
206                                       V_QUEUENO(0));
207         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
208         req->mask = cpu_to_be64(mask);
209         req->val = cpu_to_be64(val);
210         sc = (struct ulptx_idata *)(req + 1);
211         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
212         sc->len = cpu_to_be32(0);
213 }
214
215 /**
216  * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
217  * IPv4 requires only 1 slot on all cards.
218  */
219 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
220 {
221         if (family == FILTER_TYPE_IPV6) {
222                 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
223                         return 4;
224
225                 return 2;
226         }
227
228         return 1;
229 }
230
231 /**
232  * Check if entries are already filled.
233  */
234 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
235 {
236         bool result = FALSE;
237         u32 i;
238
239         /* Ensure there's enough slots available. */
240         t4_os_lock(&t->ftid_lock);
241         for (i = fidx; i < fidx + nentries; i++) {
242                 if (rte_bitmap_get(t->ftid_bmap, i)) {
243                         result = TRUE;
244                         break;
245                 }
246         }
247         t4_os_unlock(&t->ftid_lock);
248         return result;
249 }
250
251 /**
252  * Allocate available free entries.
253  */
254 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
255 {
256         struct tid_info *t = &adap->tids;
257         int pos;
258         int size = t->nftids;
259
260         t4_os_lock(&t->ftid_lock);
261         if (nentries > 1)
262                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
263                                                     nentries);
264         else
265                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
266         t4_os_unlock(&t->ftid_lock);
267
268         return pos < size ? pos : -1;
269 }
270
271 /**
272  * Construct hash filter ntuple.
273  */
274 static u64 hash_filter_ntuple(const struct filter_entry *f)
275 {
276         struct adapter *adap = ethdev2adap(f->dev);
277         struct tp_params *tp = &adap->params.tp;
278         u64 ntuple = 0;
279         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
280
281         if (tp->port_shift >= 0 && f->fs.mask.iport)
282                 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
283
284         if (tp->protocol_shift >= 0) {
285                 if (!f->fs.val.proto)
286                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
287                 else
288                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
289         }
290
291         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
292                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
293         if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
294                 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
295
296         return ntuple;
297 }
298
299 /**
300  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
301  */
302 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
303                              unsigned int tid)
304 {
305         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
306         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
307
308         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
309                                       V_ULP_TXPKT_DEST(0));
310         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
311         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
312         sc->len = cpu_to_be32(sizeof(*abort_req) -
313                               sizeof(struct work_request_hdr));
314         OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
315         abort_req->rsvd0 = cpu_to_be32(0);
316         abort_req->rsvd1 = 0;
317         abort_req->cmd = CPL_ABORT_NO_RST;
318         sc = (struct ulptx_idata *)(abort_req + 1);
319         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
320         sc->len = cpu_to_be32(0);
321 }
322
323 /**
324  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
325  */
326 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
327                              unsigned int tid)
328 {
329         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
330         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
331
332         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
333                                       V_ULP_TXPKT_DEST(0));
334         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
335         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
336         sc->len = cpu_to_be32(sizeof(*abort_rpl) -
337                               sizeof(struct work_request_hdr));
338         OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
339         abort_rpl->rsvd0 = cpu_to_be32(0);
340         abort_rpl->rsvd1 = 0;
341         abort_rpl->cmd = CPL_ABORT_NO_RST;
342         sc = (struct ulptx_idata *)(abort_rpl + 1);
343         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
344         sc->len = cpu_to_be32(0);
345 }
346
347 /**
348  * Delete the specified hash filter.
349  */
350 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
351                                  unsigned int filter_id,
352                                  struct filter_ctx *ctx)
353 {
354         struct adapter *adapter = ethdev2adap(dev);
355         struct tid_info *t = &adapter->tids;
356         struct filter_entry *f;
357         struct sge_ctrl_txq *ctrlq;
358         unsigned int port_id = ethdev2pinfo(dev)->port_id;
359         int ret;
360
361         if (filter_id > adapter->tids.ntids)
362                 return -E2BIG;
363
364         f = lookup_tid(t, filter_id);
365         if (!f) {
366                 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
367                         __func__, filter_id);
368                 return -EINVAL;
369         }
370
371         ret = writable_filter(f);
372         if (ret)
373                 return ret;
374
375         if (f->valid) {
376                 unsigned int wrlen;
377                 struct rte_mbuf *mbuf;
378                 struct work_request_hdr *wr;
379                 struct ulptx_idata *aligner;
380                 struct cpl_set_tcb_field *req;
381                 struct cpl_abort_req *abort_req;
382                 struct cpl_abort_rpl *abort_rpl;
383
384                 f->ctx = ctx;
385                 f->pending = 1;
386
387                 wrlen = cxgbe_roundup(sizeof(*wr) +
388                                       (sizeof(*req) + sizeof(*aligner)) +
389                                       sizeof(*abort_req) + sizeof(*abort_rpl),
390                                       16);
391
392                 ctrlq = &adapter->sge.ctrlq[port_id];
393                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
394                 if (!mbuf) {
395                         dev_err(adapter, "%s: could not allocate skb ..\n",
396                                 __func__);
397                         goto out_err;
398                 }
399
400                 mbuf->data_len = wrlen;
401                 mbuf->pkt_len = mbuf->data_len;
402
403                 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
404                 INIT_ULPTX_WR(req, wrlen, 0, 0);
405                 wr = (struct work_request_hdr *)req;
406                 wr++;
407                 req = (struct cpl_set_tcb_field *)wr;
408                 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
409                                 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
410                                 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
411                                 0, 1);
412                 aligner = (struct ulptx_idata *)(req + 1);
413                 abort_req = (struct cpl_abort_req *)(aligner + 1);
414                 mk_abort_req_ulp(abort_req, f->tid);
415                 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
416                 mk_abort_rpl_ulp(abort_rpl, f->tid);
417                 t4_mgmt_tx(ctrlq, mbuf);
418         }
419         return 0;
420
421 out_err:
422         return -ENOMEM;
423 }
424
425 /**
426  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
427  */
428 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
429                              unsigned int qid_filterid, struct adapter *adap)
430 {
431         struct cpl_t6_act_open_req6 *req = NULL;
432         u64 local_lo, local_hi, peer_lo, peer_hi;
433         u32 *lip = (u32 *)f->fs.val.lip;
434         u32 *fip = (u32 *)f->fs.val.fip;
435
436         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
437         case CHELSIO_T6:
438                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
439
440                 INIT_TP_WR(req, 0);
441                 break;
442         default:
443                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
444                 return;
445         }
446
447         local_hi = ((u64)lip[1]) << 32 | lip[0];
448         local_lo = ((u64)lip[3]) << 32 | lip[2];
449         peer_hi = ((u64)fip[1]) << 32 | fip[0];
450         peer_lo = ((u64)fip[3]) << 32 | fip[2];
451
452         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
453                                                     qid_filterid));
454         req->local_port = cpu_to_be16(f->fs.val.lport);
455         req->peer_port = cpu_to_be16(f->fs.val.fport);
456         req->local_ip_hi = local_hi;
457         req->local_ip_lo = local_lo;
458         req->peer_ip_hi = peer_hi;
459         req->peer_ip_lo = peer_lo;
460         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
461                                         f->fs.newvlan == VLAN_REWRITE) |
462                                 V_DELACK(f->fs.hitcnts) |
463                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
464                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
465                                            << 1) |
466                                 V_TX_CHAN(f->fs.eport) |
467                                 V_ULP_MODE(ULP_MODE_NONE) |
468                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
469         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
470         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
471                             V_RSS_QUEUE(f->fs.iq) |
472                             F_T5_OPT_2_VALID |
473                             F_RX_CHANNEL |
474                             V_SACK_EN(f->fs.swapmac) |
475                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
476                                          (f->fs.dirsteer << 1)) |
477                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
478 }
479
480 /**
481  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
482  */
483 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
484                             unsigned int qid_filterid, struct adapter *adap)
485 {
486         struct cpl_t6_act_open_req *req = NULL;
487
488         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
489         case CHELSIO_T6:
490                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
491
492                 INIT_TP_WR(req, 0);
493                 break;
494         default:
495                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
496                 return;
497         }
498
499         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
500                                                     qid_filterid));
501         req->local_port = cpu_to_be16(f->fs.val.lport);
502         req->peer_port = cpu_to_be16(f->fs.val.fport);
503         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
504                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
505         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
506                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
507         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
508                                         f->fs.newvlan == VLAN_REWRITE) |
509                                 V_DELACK(f->fs.hitcnts) |
510                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
511                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
512                                            << 1) |
513                                 V_TX_CHAN(f->fs.eport) |
514                                 V_ULP_MODE(ULP_MODE_NONE) |
515                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
516         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
517         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
518                             V_RSS_QUEUE(f->fs.iq) |
519                             F_T5_OPT_2_VALID |
520                             F_RX_CHANNEL |
521                             V_SACK_EN(f->fs.swapmac) |
522                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
523                                          (f->fs.dirsteer << 1)) |
524                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
525 }
526
527 /**
528  * Set the specified hash filter.
529  */
530 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
531                                  struct ch_filter_specification *fs,
532                                  struct filter_ctx *ctx)
533 {
534         struct port_info *pi = ethdev2pinfo(dev);
535         struct adapter *adapter = pi->adapter;
536         struct tid_info *t = &adapter->tids;
537         struct filter_entry *f;
538         struct rte_mbuf *mbuf;
539         struct sge_ctrl_txq *ctrlq;
540         unsigned int iq;
541         int atid, size;
542         int ret = 0;
543
544         ret = cxgbe_validate_filter(adapter, fs);
545         if (ret)
546                 return ret;
547
548         iq = get_filter_steerq(dev, fs);
549
550         ctrlq = &adapter->sge.ctrlq[pi->port_id];
551
552         f = t4_os_alloc(sizeof(*f));
553         if (!f)
554                 goto out_err;
555
556         f->fs = *fs;
557         f->ctx = ctx;
558         f->dev = dev;
559         f->fs.iq = iq;
560
561         /*
562          * If the new filter requires loopback Destination MAC and/or VLAN
563          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
564          * the filter.
565          */
566         if (f->fs.newvlan == VLAN_INSERT ||
567             f->fs.newvlan == VLAN_REWRITE) {
568                 /* allocate L2T entry for new filter */
569                 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
570                                                    f->fs.eport, f->fs.dmac);
571                 if (!f->l2t) {
572                         ret = -ENOMEM;
573                         goto out_err;
574                 }
575         }
576
577         atid = cxgbe_alloc_atid(t, f);
578         if (atid < 0)
579                 goto out_err;
580
581         if (f->fs.type == FILTER_TYPE_IPV6) {
582                 /* IPv6 hash filter */
583                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
584                 if (!f->clipt)
585                         goto free_atid;
586
587                 size = sizeof(struct cpl_t6_act_open_req6);
588                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
589                 if (!mbuf) {
590                         ret = -ENOMEM;
591                         goto free_clip;
592                 }
593
594                 mbuf->data_len = size;
595                 mbuf->pkt_len = mbuf->data_len;
596
597                 mk_act_open_req6(f, mbuf,
598                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
599                                  adapter);
600         } else {
601                 /* IPv4 hash filter */
602                 size = sizeof(struct cpl_t6_act_open_req);
603                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
604                 if (!mbuf) {
605                         ret = -ENOMEM;
606                         goto free_atid;
607                 }
608
609                 mbuf->data_len = size;
610                 mbuf->pkt_len = mbuf->data_len;
611
612                 mk_act_open_req(f, mbuf,
613                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
614                                 adapter);
615         }
616
617         f->pending = 1;
618         t4_mgmt_tx(ctrlq, mbuf);
619         return 0;
620
621 free_clip:
622         cxgbe_clip_release(f->dev, f->clipt);
623 free_atid:
624         cxgbe_free_atid(t, atid);
625
626 out_err:
627         t4_os_free(f);
628         return ret;
629 }
630
631 /**
632  * Clear a filter and release any of its resources that we own.  This also
633  * clears the filter's "pending" status.
634  */
635 static void clear_filter(struct filter_entry *f)
636 {
637         if (f->clipt)
638                 cxgbe_clip_release(f->dev, f->clipt);
639
640         /*
641          * The zeroing of the filter rule below clears the filter valid,
642          * pending, locked flags etc. so it's all we need for
643          * this operation.
644          */
645         memset(f, 0, sizeof(*f));
646 }
647
648 /**
649  * t4_mk_filtdelwr - create a delete filter WR
650  * @adap: adapter context
651  * @ftid: the filter ID
652  * @wr: the filter work request to populate
653  * @qid: ingress queue to receive the delete notification
654  *
655  * Creates a filter work request to delete the supplied filter.  If @qid is
656  * negative the delete notification is suppressed.
657  */
658 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
659                             struct fw_filter2_wr *wr, int qid)
660 {
661         memset(wr, 0, sizeof(*wr));
662         if (adap->params.filter2_wr_support)
663                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
664         else
665                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
666         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
667         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
668                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
669         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
670         if (qid >= 0)
671                 wr->rx_chan_rx_rpl_iq =
672                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
673 }
674
675 /**
676  * Create FW work request to delete the filter at a specified index
677  */
678 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
679 {
680         struct adapter *adapter = ethdev2adap(dev);
681         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
682         struct rte_mbuf *mbuf;
683         struct fw_filter2_wr *fwr;
684         struct sge_ctrl_txq *ctrlq;
685         unsigned int port_id = ethdev2pinfo(dev)->port_id;
686
687         ctrlq = &adapter->sge.ctrlq[port_id];
688         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
689         if (!mbuf)
690                 return -ENOMEM;
691
692         mbuf->data_len = sizeof(*fwr);
693         mbuf->pkt_len = mbuf->data_len;
694
695         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
696         t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
697
698         /*
699          * Mark the filter as "pending" and ship off the Filter Work Request.
700          * When we get the Work Request Reply we'll clear the pending status.
701          */
702         f->pending = 1;
703         t4_mgmt_tx(ctrlq, mbuf);
704         return 0;
705 }
706
707 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
708 {
709         struct adapter *adapter = ethdev2adap(dev);
710         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
711         struct rte_mbuf *mbuf;
712         struct fw_filter2_wr *fwr;
713         struct sge_ctrl_txq *ctrlq;
714         unsigned int port_id = ethdev2pinfo(dev)->port_id;
715         int ret;
716
717         /*
718          * If the new filter requires loopback Destination MAC and/or VLAN
719          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
720          * the filter.
721          */
722         if (f->fs.newvlan) {
723                 /* allocate L2T entry for new filter */
724                 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
725                                                    f->fs.eport, f->fs.dmac);
726                 if (!f->l2t)
727                         return -ENOMEM;
728         }
729
730         ctrlq = &adapter->sge.ctrlq[port_id];
731         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
732         if (!mbuf) {
733                 ret = -ENOMEM;
734                 goto out;
735         }
736
737         mbuf->data_len = sizeof(*fwr);
738         mbuf->pkt_len = mbuf->data_len;
739
740         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
741         memset(fwr, 0, sizeof(*fwr));
742
743         /*
744          * Construct the work request to set the filter.
745          */
746         if (adapter->params.filter2_wr_support)
747                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
748         else
749                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
750         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
751         fwr->tid_to_iq =
752                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
753                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
754                             V_FW_FILTER_WR_NOREPLY(0) |
755                             V_FW_FILTER_WR_IQ(f->fs.iq));
756         fwr->del_filter_to_l2tix =
757                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
758                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
759                             V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
760                             V_FW_FILTER_WR_INSVLAN
761                                 (f->fs.newvlan == VLAN_INSERT ||
762                                  f->fs.newvlan == VLAN_REWRITE) |
763                             V_FW_FILTER_WR_RMVLAN
764                                 (f->fs.newvlan == VLAN_REMOVE ||
765                                  f->fs.newvlan == VLAN_REWRITE) |
766                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
767                             V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
768                             V_FW_FILTER_WR_PRIO(f->fs.prio) |
769                             V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
770         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
771         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
772         fwr->smac_sel = 0;
773         fwr->rx_chan_rx_rpl_iq =
774                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
775                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
776                                                      ));
777         fwr->maci_to_matchtypem =
778                 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
779                             V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
780                             V_FW_FILTER_WR_PORT(f->fs.val.iport) |
781                             V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
782         fwr->ptcl = f->fs.val.proto;
783         fwr->ptclm = f->fs.mask.proto;
784         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
785         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
786         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
787         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
788         fwr->lp = cpu_to_be16(f->fs.val.lport);
789         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
790         fwr->fp = cpu_to_be16(f->fs.val.fport);
791         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
792
793         if (adapter->params.filter2_wr_support) {
794                 fwr->filter_type_swapmac =
795                          V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
796                 fwr->natmode_to_ulp_type =
797                         V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
798                                                  ULP_MODE_TCPDDP :
799                                                  ULP_MODE_NONE) |
800                         V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
801                 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
802                 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
803                 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
804                 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
805         }
806
807         /*
808          * Mark the filter as "pending" and ship off the Filter Work Request.
809          * When we get the Work Request Reply we'll clear the pending status.
810          */
811         f->pending = 1;
812         t4_mgmt_tx(ctrlq, mbuf);
813         return 0;
814
815 out:
816         return ret;
817 }
818
819 /**
820  * Set the corresponding entries in the bitmap.
821  */
822 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
823 {
824         u32 i;
825
826         t4_os_lock(&t->ftid_lock);
827         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
828                 t4_os_unlock(&t->ftid_lock);
829                 return -EBUSY;
830         }
831
832         for (i = fidx; i < fidx + nentries; i++)
833                 rte_bitmap_set(t->ftid_bmap, i);
834         t4_os_unlock(&t->ftid_lock);
835         return 0;
836 }
837
838 /**
839  * Clear the corresponding entries in the bitmap.
840  */
841 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
842 {
843         u32 i;
844
845         t4_os_lock(&t->ftid_lock);
846         for (i = fidx; i < fidx + nentries; i++)
847                 rte_bitmap_clear(t->ftid_bmap, i);
848         t4_os_unlock(&t->ftid_lock);
849 }
850
851 /**
852  * Check a delete filter request for validity and send it to the hardware.
853  * Return 0 on success, an error number otherwise.  We attach any provided
854  * filter operation context to the internal filter specification in order to
855  * facilitate signaling completion of the operation.
856  */
857 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
858                      struct ch_filter_specification *fs,
859                      struct filter_ctx *ctx)
860 {
861         struct port_info *pi = dev->data->dev_private;
862         struct adapter *adapter = pi->adapter;
863         struct filter_entry *f;
864         unsigned int chip_ver;
865         u8 nentries;
866         int ret;
867
868         if (is_hashfilter(adapter) && fs->cap)
869                 return cxgbe_del_hash_filter(dev, filter_id, ctx);
870
871         if (filter_id >= adapter->tids.nftids)
872                 return -ERANGE;
873
874         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
875
876         /*
877          * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
878          * and 4 slot boundary for cards below T6.
879          */
880         if (fs->type == FILTER_TYPE_IPV6) {
881                 if (chip_ver < CHELSIO_T6)
882                         filter_id &= ~(0x3);
883                 else
884                         filter_id &= ~(0x1);
885         }
886
887         nentries = cxgbe_filter_slots(adapter, fs->type);
888         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
889         if (!ret) {
890                 dev_warn(adap, "%s: could not find filter entry: %u\n",
891                          __func__, filter_id);
892                 return -EINVAL;
893         }
894
895         f = &adapter->tids.ftid_tab[filter_id];
896         ret = writable_filter(f);
897         if (ret)
898                 return ret;
899
900         if (f->valid) {
901                 f->ctx = ctx;
902                 cxgbe_clear_ftid(&adapter->tids,
903                                  f->tid - adapter->tids.ftid_base,
904                                  nentries);
905                 return del_filter_wr(dev, filter_id);
906         }
907
908         /*
909          * If the caller has passed in a Completion Context then we need to
910          * mark it as a successful completion so they don't stall waiting
911          * for it.
912          */
913         if (ctx) {
914                 ctx->result = 0;
915                 t4_complete(&ctx->completion);
916         }
917
918         return 0;
919 }
920
921 /**
922  * Check a Chelsio Filter Request for validity, convert it into our internal
923  * format and send it to the hardware.  Return 0 on success, an error number
924  * otherwise.  We attach any provided filter operation context to the internal
925  * filter specification in order to facilitate signaling completion of the
926  * operation.
927  */
928 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
929                      struct ch_filter_specification *fs,
930                      struct filter_ctx *ctx)
931 {
932         struct port_info *pi = ethdev2pinfo(dev);
933         struct adapter *adapter = pi->adapter;
934         unsigned int fidx, iq;
935         struct filter_entry *f;
936         unsigned int chip_ver;
937         u8 nentries, bitoff[16] = {0};
938         int ret;
939
940         if (is_hashfilter(adapter) && fs->cap)
941                 return cxgbe_set_hash_filter(dev, fs, ctx);
942
943         if (filter_id >= adapter->tids.nftids)
944                 return -ERANGE;
945
946         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
947
948         ret = cxgbe_validate_filter(adapter, fs);
949         if (ret)
950                 return ret;
951
952         /*
953          * IPv6 filters occupy four slots and must be aligned on four-slot
954          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
955          * must be aligned on two-slot boundaries.
956          *
957          * IPv4 filters only occupy a single slot and have no alignment
958          * requirements.
959          */
960         fidx = filter_id;
961         if (fs->type == FILTER_TYPE_IPV6) {
962                 if (chip_ver < CHELSIO_T6)
963                         fidx &= ~(0x3);
964                 else
965                         fidx &= ~(0x1);
966         }
967
968         if (fidx != filter_id)
969                 return -EINVAL;
970
971         nentries = cxgbe_filter_slots(adapter, fs->type);
972         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
973         if (ret)
974                 return -EBUSY;
975
976         iq = get_filter_steerq(dev, fs);
977
978         /*
979          * Check to make sure that provided filter index is not
980          * already in use by someone else
981          */
982         f = &adapter->tids.ftid_tab[filter_id];
983         if (f->valid)
984                 return -EBUSY;
985
986         fidx = adapter->tids.ftid_base + filter_id;
987         ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
988         if (ret)
989                 return ret;
990
991         /*
992          * Check to make sure the filter requested is writable ...
993          */
994         ret = writable_filter(f);
995         if (ret) {
996                 /* Clear the bits we have set above */
997                 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
998                 return ret;
999         }
1000
1001         /*
1002          * Allocate a clip table entry only if we have non-zero IPv6 address
1003          */
1004         if (chip_ver > CHELSIO_T5 && fs->type &&
1005             memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
1006                 f->clipt = cxgbe_clip_alloc(dev, (u32 *)&fs->val.lip);
1007                 if (!f->clipt)
1008                         goto free_tid;
1009         }
1010
1011         /*
1012          * Convert the filter specification into our internal format.
1013          * We copy the PF/VF specification into the Outer VLAN field
1014          * here so the rest of the code -- including the interface to
1015          * the firmware -- doesn't have to constantly do these checks.
1016          */
1017         f->fs = *fs;
1018         f->fs.iq = iq;
1019         f->dev = dev;
1020
1021         /*
1022          * Attempt to set the filter.  If we don't succeed, we clear
1023          * it and return the failure.
1024          */
1025         f->ctx = ctx;
1026         f->tid = fidx; /* Save the actual tid */
1027         ret = set_filter_wr(dev, filter_id);
1028         if (ret)
1029                 goto free_tid;
1030
1031         return ret;
1032
1033 free_tid:
1034         cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1035         clear_filter(f);
1036         return ret;
1037 }
1038
1039 /**
1040  * Handle a Hash filter write reply.
1041  */
1042 void cxgbe_hash_filter_rpl(struct adapter *adap,
1043                            const struct cpl_act_open_rpl *rpl)
1044 {
1045         struct tid_info *t = &adap->tids;
1046         struct filter_entry *f;
1047         struct filter_ctx *ctx = NULL;
1048         unsigned int tid = GET_TID(rpl);
1049         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1050                                       (be32_to_cpu(rpl->atid_status)));
1051         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1052
1053         f = lookup_atid(t, ftid);
1054         if (!f) {
1055                 dev_warn(adap, "%s: could not find filter entry: %d\n",
1056                          __func__, ftid);
1057                 return;
1058         }
1059
1060         ctx = f->ctx;
1061         f->ctx = NULL;
1062
1063         switch (status) {
1064         case CPL_ERR_NONE: {
1065                 f->tid = tid;
1066                 f->pending = 0;  /* asynchronous setup completed */
1067                 f->valid = 1;
1068
1069                 cxgbe_insert_tid(t, f, f->tid, 0);
1070                 cxgbe_free_atid(t, ftid);
1071                 if (ctx) {
1072                         ctx->tid = f->tid;
1073                         ctx->result = 0;
1074                 }
1075                 if (f->fs.hitcnts)
1076                         set_tcb_field(adap, tid,
1077                                       W_TCB_TIMESTAMP,
1078                                       V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1079                                       V_TCB_T_RTT_TS_RECENT_AGE
1080                                               (M_TCB_T_RTT_TS_RECENT_AGE),
1081                                       V_TCB_TIMESTAMP(0ULL) |
1082                                       V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1083                                       1);
1084                 if (f->fs.newvlan == VLAN_INSERT ||
1085                     f->fs.newvlan == VLAN_REWRITE)
1086                         set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1087                 break;
1088         }
1089         default:
1090                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1091                          __func__, status);
1092
1093                 if (ctx) {
1094                         if (status == CPL_ERR_TCAM_FULL)
1095                                 ctx->result = -EAGAIN;
1096                         else
1097                                 ctx->result = -EINVAL;
1098                 }
1099
1100                 cxgbe_free_atid(t, ftid);
1101                 t4_os_free(f);
1102         }
1103
1104         if (ctx)
1105                 t4_complete(&ctx->completion);
1106 }
1107
1108 /**
1109  * Handle a LE-TCAM filter write/deletion reply.
1110  */
1111 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1112 {
1113         struct filter_entry *f = NULL;
1114         unsigned int tid = GET_TID(rpl);
1115         int idx, max_fidx = adap->tids.nftids;
1116
1117         /* Get the corresponding filter entry for this tid */
1118         if (adap->tids.ftid_tab) {
1119                 /* Check this in normal filter region */
1120                 idx = tid - adap->tids.ftid_base;
1121                 if (idx >= max_fidx)
1122                         return;
1123
1124                 f = &adap->tids.ftid_tab[idx];
1125                 if (f->tid != tid)
1126                         return;
1127         }
1128
1129         /* We found the filter entry for this tid */
1130         if (f) {
1131                 unsigned int ret = G_COOKIE(rpl->cookie);
1132                 struct filter_ctx *ctx;
1133
1134                 /*
1135                  * Pull off any filter operation context attached to the
1136                  * filter.
1137                  */
1138                 ctx = f->ctx;
1139                 f->ctx = NULL;
1140
1141                 if (ret == FW_FILTER_WR_FLT_ADDED) {
1142                         f->pending = 0;  /* asynchronous setup completed */
1143                         f->valid = 1;
1144                         if (ctx) {
1145                                 ctx->tid = f->tid;
1146                                 ctx->result = 0;
1147                         }
1148                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1149                         /*
1150                          * Clear the filter when we get confirmation from the
1151                          * hardware that the filter has been deleted.
1152                          */
1153                         clear_filter(f);
1154                         if (ctx)
1155                                 ctx->result = 0;
1156                 } else {
1157                         /*
1158                          * Something went wrong.  Issue a warning about the
1159                          * problem and clear everything out.
1160                          */
1161                         dev_warn(adap, "filter %u setup failed with error %u\n",
1162                                  idx, ret);
1163                         clear_filter(f);
1164                         if (ctx)
1165                                 ctx->result = -EINVAL;
1166                 }
1167
1168                 if (ctx)
1169                         t4_complete(&ctx->completion);
1170         }
1171 }
1172
1173 /*
1174  * Retrieve the packet count for the specified filter.
1175  */
1176 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1177                            u64 *c, int hash, bool get_byte)
1178 {
1179         struct filter_entry *f;
1180         unsigned int tcb_base, tcbaddr;
1181         int ret;
1182
1183         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1184         if (is_hashfilter(adapter) && hash) {
1185                 if (fidx < adapter->tids.ntids) {
1186                         f = adapter->tids.tid_tab[fidx];
1187                         if (!f)
1188                                 return -EINVAL;
1189
1190                         if (is_t5(adapter->params.chip)) {
1191                                 *c = 0;
1192                                 return 0;
1193                         }
1194                         tcbaddr = tcb_base + (fidx * TCB_SIZE);
1195                         goto get_count;
1196                 } else {
1197                         return -ERANGE;
1198                 }
1199         } else {
1200                 if (fidx >= adapter->tids.nftids)
1201                         return -ERANGE;
1202
1203                 f = &adapter->tids.ftid_tab[fidx];
1204                 if (!f->valid)
1205                         return -EINVAL;
1206
1207                 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1208         }
1209
1210         f = &adapter->tids.ftid_tab[fidx];
1211         if (!f->valid)
1212                 return -EINVAL;
1213
1214 get_count:
1215         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1216                 /*
1217                  * For T5, the Filter Packet Hit Count is maintained as a
1218                  * 32-bit Big Endian value in the TCB field {timestamp}.
1219                  * Similar to the craziness above, instead of the filter hit
1220                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1221                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1222                  */
1223                 if (get_byte) {
1224                         unsigned int word_offset = 4;
1225                         __be64 be64_byte_count;
1226
1227                         t4_os_lock(&adapter->win0_lock);
1228                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1229                                            tcbaddr +
1230                                            (word_offset * sizeof(__be32)),
1231                                            sizeof(be64_byte_count),
1232                                            &be64_byte_count,
1233                                            T4_MEMORY_READ);
1234                         t4_os_unlock(&adapter->win0_lock);
1235                         if (ret < 0)
1236                                 return ret;
1237                         *c = be64_to_cpu(be64_byte_count);
1238                 } else {
1239                         unsigned int word_offset = 6;
1240                         __be32 be32_count;
1241
1242                         t4_os_lock(&adapter->win0_lock);
1243                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1244                                            tcbaddr +
1245                                            (word_offset * sizeof(__be32)),
1246                                            sizeof(be32_count), &be32_count,
1247                                            T4_MEMORY_READ);
1248                         t4_os_unlock(&adapter->win0_lock);
1249                         if (ret < 0)
1250                                 return ret;
1251                         *c = (u64)be32_to_cpu(be32_count);
1252                 }
1253         }
1254         return 0;
1255 }
1256
1257 /*
1258  * Clear the packet count for the specified filter.
1259  */
1260 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1261                              int hash, bool clear_byte)
1262 {
1263         u64 tcb_mask = 0, tcb_val = 0;
1264         struct filter_entry *f = NULL;
1265         u16 tcb_word = 0;
1266
1267         if (is_hashfilter(adapter) && hash) {
1268                 if (fidx >= adapter->tids.ntids)
1269                         return -ERANGE;
1270
1271                 /* No hitcounts supported for T5 hashfilters */
1272                 if (is_t5(adapter->params.chip))
1273                         return 0;
1274
1275                 f = adapter->tids.tid_tab[fidx];
1276         } else {
1277                 if (fidx >= adapter->tids.nftids)
1278                         return -ERANGE;
1279
1280                 f = &adapter->tids.ftid_tab[fidx];
1281         }
1282
1283         if (!f || !f->valid)
1284                 return -EINVAL;
1285
1286         tcb_word = W_TCB_TIMESTAMP;
1287         tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1288         tcb_val = V_TCB_TIMESTAMP(0ULL);
1289
1290         set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1291
1292         if (clear_byte) {
1293                 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1294                 tcb_mask =
1295                         V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1296                         V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1297                 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1298                           V_TCB_T_RTSEQ_RECENT(0ULL);
1299
1300                 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1301         }
1302
1303         return 0;
1304 }
1305
1306 /**
1307  * Handle a Hash filter delete reply.
1308  */
1309 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1310                                const struct cpl_abort_rpl_rss *rpl)
1311 {
1312         struct tid_info *t = &adap->tids;
1313         struct filter_entry *f;
1314         struct filter_ctx *ctx = NULL;
1315         unsigned int tid = GET_TID(rpl);
1316
1317         f = lookup_tid(t, tid);
1318         if (!f) {
1319                 dev_warn(adap, "%s: could not find filter entry: %u\n",
1320                          __func__, tid);
1321                 return;
1322         }
1323
1324         ctx = f->ctx;
1325         f->ctx = NULL;
1326
1327         f->valid = 0;
1328
1329         if (f->clipt)
1330                 cxgbe_clip_release(f->dev, f->clipt);
1331
1332         cxgbe_remove_tid(t, 0, tid, 0);
1333         t4_os_free(f);
1334
1335         if (ctx) {
1336                 ctx->result = 0;
1337                 t4_complete(&ctx->completion);
1338         }
1339 }