net/cxgbe: add flow actions to modify IP and TCP/UDP port
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6 #include "common.h"
7 #include "t4_tcb.h"
8 #include "t4_regs.h"
9 #include "cxgbe_filter.h"
10 #include "clip_tbl.h"
11 #include "l2t.h"
12
13 /**
14  * Initialize Hash Filters
15  */
16 int init_hash_filter(struct adapter *adap)
17 {
18         unsigned int n_user_filters;
19         unsigned int user_filter_perc;
20         int ret;
21         u32 params[7], val[7];
22
23 #define FW_PARAM_DEV(param) \
24         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
25         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
26
27 #define FW_PARAM_PFVF(param) \
28         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
29         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
30         V_FW_PARAMS_PARAM_Y(0) | \
31         V_FW_PARAMS_PARAM_Z(0))
32
33         params[0] = FW_PARAM_DEV(NTID);
34         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
35                               params, val);
36         if (ret < 0)
37                 return ret;
38         adap->tids.ntids = val[0];
39         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
40
41         user_filter_perc = 100;
42         n_user_filters = mult_frac(adap->tids.nftids,
43                                    user_filter_perc,
44                                    100);
45
46         adap->tids.nftids = n_user_filters;
47         adap->params.hash_filter = 1;
48         return 0;
49 }
50
51 /**
52  * Validate if the requested filter specification can be set by checking
53  * if the requested features have been enabled
54  */
55 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
56 {
57         u32 fconf;
58
59         /*
60          * Check for unconfigured fields being used.
61          */
62         fconf = adapter->params.tp.vlan_pri_map;
63
64 #define S(_field) \
65         (fs->val._field || fs->mask._field)
66 #define U(_mask, _field) \
67         (!(fconf & (_mask)) && S(_field))
68
69         if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
70             U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx))
71                 return -EOPNOTSUPP;
72
73 #undef S
74 #undef U
75
76         /*
77          * If the user is requesting that the filter action loop
78          * matching packets back out one of our ports, make sure that
79          * the egress port is in range.
80          */
81         if (fs->action == FILTER_SWITCH &&
82             fs->eport >= adapter->params.nports)
83                 return -ERANGE;
84
85         /*
86          * Don't allow various trivially obvious bogus out-of-range
87          * values ...
88          */
89         if (fs->val.iport >= adapter->params.nports)
90                 return -ERANGE;
91
92         if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
93                 return -EOPNOTSUPP;
94
95         return 0;
96 }
97
98 /**
99  * Get the queue to which the traffic must be steered to.
100  */
101 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
102                                       struct ch_filter_specification *fs)
103 {
104         struct port_info *pi = ethdev2pinfo(dev);
105         struct adapter *adapter = pi->adapter;
106         unsigned int iq;
107
108         /*
109          * If the user has requested steering matching Ingress Packets
110          * to a specific Queue Set, we need to make sure it's in range
111          * for the port and map that into the Absolute Queue ID of the
112          * Queue Set's Response Queue.
113          */
114         if (!fs->dirsteer) {
115                 iq = 0;
116         } else {
117                 /*
118                  * If the iq id is greater than the number of qsets,
119                  * then assume it is an absolute qid.
120                  */
121                 if (fs->iq < pi->n_rx_qsets)
122                         iq = adapter->sge.ethrxq[pi->first_qset +
123                                                  fs->iq].rspq.abs_id;
124                 else
125                         iq = fs->iq;
126         }
127
128         return iq;
129 }
130
131 /* Return an error number if the indicated filter isn't writable ... */
132 int writable_filter(struct filter_entry *f)
133 {
134         if (f->locked)
135                 return -EPERM;
136         if (f->pending)
137                 return -EBUSY;
138
139         return 0;
140 }
141
142 /**
143  * Send CPL_SET_TCB_FIELD message
144  */
145 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
146                           u16 word, u64 mask, u64 val, int no_reply)
147 {
148         struct rte_mbuf *mbuf;
149         struct cpl_set_tcb_field *req;
150         struct sge_ctrl_txq *ctrlq;
151
152         ctrlq = &adapter->sge.ctrlq[0];
153         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
154         WARN_ON(!mbuf);
155
156         mbuf->data_len = sizeof(*req);
157         mbuf->pkt_len = mbuf->data_len;
158
159         req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
160         memset(req, 0, sizeof(*req));
161         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
162         req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
163                                       V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
164                                       V_NO_REPLY(no_reply));
165         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
166         req->mask = cpu_to_be64(mask);
167         req->val = cpu_to_be64(val);
168
169         t4_mgmt_tx(ctrlq, mbuf);
170 }
171
172 /**
173  * Set one of the t_flags bits in the TCB.
174  */
175 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
176                           unsigned int bit_pos, unsigned int val, int no_reply)
177 {
178         set_tcb_field(adap, ftid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
179                       (unsigned long long)val << bit_pos, no_reply);
180 }
181
182 /**
183  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
184  */
185 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
186                                         struct cpl_set_tcb_field *req,
187                                         unsigned int word,
188                                         u64 mask, u64 val, u8 cookie,
189                                         int no_reply)
190 {
191         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
192         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
193
194         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
195                                       V_ULP_TXPKT_DEST(0));
196         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
197         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
198         sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
199         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
200         req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
201                                       V_QUEUENO(0));
202         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
203         req->mask = cpu_to_be64(mask);
204         req->val = cpu_to_be64(val);
205         sc = (struct ulptx_idata *)(req + 1);
206         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
207         sc->len = cpu_to_be32(0);
208 }
209
210 /**
211  * Check if entry already filled.
212  */
213 bool is_filter_set(struct tid_info *t, int fidx, int family)
214 {
215         bool result = FALSE;
216         int i, max;
217
218         /* IPv6 requires four slots and IPv4 requires only 1 slot.
219          * Ensure, there's enough slots available.
220          */
221         max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
222
223         t4_os_lock(&t->ftid_lock);
224         for (i = fidx; i <= max; i++) {
225                 if (rte_bitmap_get(t->ftid_bmap, i)) {
226                         result = TRUE;
227                         break;
228                 }
229         }
230         t4_os_unlock(&t->ftid_lock);
231         return result;
232 }
233
234 /**
235  * Allocate a available free entry
236  */
237 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
238 {
239         struct tid_info *t = &adap->tids;
240         int pos;
241         int size = t->nftids;
242
243         t4_os_lock(&t->ftid_lock);
244         if (family == FILTER_TYPE_IPV6)
245                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
246         else
247                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
248         t4_os_unlock(&t->ftid_lock);
249
250         return pos < size ? pos : -1;
251 }
252
253 /**
254  * Construct hash filter ntuple.
255  */
256 static u64 hash_filter_ntuple(const struct filter_entry *f)
257 {
258         struct adapter *adap = ethdev2adap(f->dev);
259         struct tp_params *tp = &adap->params.tp;
260         u64 ntuple = 0;
261         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
262
263         if (tp->port_shift >= 0)
264                 ntuple |= (u64)f->fs.mask.iport << tp->port_shift;
265
266         if (tp->protocol_shift >= 0) {
267                 if (!f->fs.val.proto)
268                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
269                 else
270                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
271         }
272
273         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
274                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
275         if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
276                 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
277
278         if (ntuple != tp->hash_filter_mask)
279                 return 0;
280
281         return ntuple;
282 }
283
284 /**
285  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
286  */
287 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
288                              unsigned int tid)
289 {
290         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
291         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
292
293         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
294                                       V_ULP_TXPKT_DEST(0));
295         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
296         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
297         sc->len = cpu_to_be32(sizeof(*abort_req) -
298                               sizeof(struct work_request_hdr));
299         OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
300         abort_req->rsvd0 = cpu_to_be32(0);
301         abort_req->rsvd1 = 0;
302         abort_req->cmd = CPL_ABORT_NO_RST;
303         sc = (struct ulptx_idata *)(abort_req + 1);
304         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
305         sc->len = cpu_to_be32(0);
306 }
307
308 /**
309  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
310  */
311 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
312                              unsigned int tid)
313 {
314         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
315         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
316
317         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
318                                       V_ULP_TXPKT_DEST(0));
319         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
320         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
321         sc->len = cpu_to_be32(sizeof(*abort_rpl) -
322                               sizeof(struct work_request_hdr));
323         OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
324         abort_rpl->rsvd0 = cpu_to_be32(0);
325         abort_rpl->rsvd1 = 0;
326         abort_rpl->cmd = CPL_ABORT_NO_RST;
327         sc = (struct ulptx_idata *)(abort_rpl + 1);
328         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
329         sc->len = cpu_to_be32(0);
330 }
331
332 /**
333  * Delete the specified hash filter.
334  */
335 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
336                                  unsigned int filter_id,
337                                  struct filter_ctx *ctx)
338 {
339         struct adapter *adapter = ethdev2adap(dev);
340         struct tid_info *t = &adapter->tids;
341         struct filter_entry *f;
342         struct sge_ctrl_txq *ctrlq;
343         unsigned int port_id = ethdev2pinfo(dev)->port_id;
344         int ret;
345
346         if (filter_id > adapter->tids.ntids)
347                 return -E2BIG;
348
349         f = lookup_tid(t, filter_id);
350         if (!f) {
351                 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
352                         __func__, filter_id);
353                 return -EINVAL;
354         }
355
356         ret = writable_filter(f);
357         if (ret)
358                 return ret;
359
360         if (f->valid) {
361                 unsigned int wrlen;
362                 struct rte_mbuf *mbuf;
363                 struct work_request_hdr *wr;
364                 struct ulptx_idata *aligner;
365                 struct cpl_set_tcb_field *req;
366                 struct cpl_abort_req *abort_req;
367                 struct cpl_abort_rpl *abort_rpl;
368
369                 f->ctx = ctx;
370                 f->pending = 1;
371
372                 wrlen = cxgbe_roundup(sizeof(*wr) +
373                                       (sizeof(*req) + sizeof(*aligner)) +
374                                       sizeof(*abort_req) + sizeof(*abort_rpl),
375                                       16);
376
377                 ctrlq = &adapter->sge.ctrlq[port_id];
378                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
379                 if (!mbuf) {
380                         dev_err(adapter, "%s: could not allocate skb ..\n",
381                                 __func__);
382                         goto out_err;
383                 }
384
385                 mbuf->data_len = wrlen;
386                 mbuf->pkt_len = mbuf->data_len;
387
388                 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
389                 INIT_ULPTX_WR(req, wrlen, 0, 0);
390                 wr = (struct work_request_hdr *)req;
391                 wr++;
392                 req = (struct cpl_set_tcb_field *)wr;
393                 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
394                                 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
395                                 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
396                                 0, 1);
397                 aligner = (struct ulptx_idata *)(req + 1);
398                 abort_req = (struct cpl_abort_req *)(aligner + 1);
399                 mk_abort_req_ulp(abort_req, f->tid);
400                 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
401                 mk_abort_rpl_ulp(abort_rpl, f->tid);
402                 t4_mgmt_tx(ctrlq, mbuf);
403         }
404         return 0;
405
406 out_err:
407         return -ENOMEM;
408 }
409
410 /**
411  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
412  */
413 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
414                              unsigned int qid_filterid, struct adapter *adap)
415 {
416         struct cpl_t6_act_open_req6 *req = NULL;
417         u64 local_lo, local_hi, peer_lo, peer_hi;
418         u32 *lip = (u32 *)f->fs.val.lip;
419         u32 *fip = (u32 *)f->fs.val.fip;
420
421         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
422         case CHELSIO_T6:
423                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
424
425                 INIT_TP_WR(req, 0);
426                 break;
427         default:
428                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
429                 return;
430         }
431
432         local_hi = ((u64)lip[1]) << 32 | lip[0];
433         local_lo = ((u64)lip[3]) << 32 | lip[2];
434         peer_hi = ((u64)fip[1]) << 32 | fip[0];
435         peer_lo = ((u64)fip[3]) << 32 | fip[2];
436
437         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
438                                                     qid_filterid));
439         req->local_port = cpu_to_be16(f->fs.val.lport);
440         req->peer_port = cpu_to_be16(f->fs.val.fport);
441         req->local_ip_hi = local_hi;
442         req->local_ip_lo = local_lo;
443         req->peer_ip_hi = peer_hi;
444         req->peer_ip_lo = peer_lo;
445         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
446                                         f->fs.newvlan == VLAN_REWRITE) |
447                                 V_DELACK(f->fs.hitcnts) |
448                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
449                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
450                                            << 1) |
451                                 V_TX_CHAN(f->fs.eport) |
452                                 V_ULP_MODE(ULP_MODE_NONE) |
453                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
454         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
455         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
456                             V_RSS_QUEUE(f->fs.iq) |
457                             F_T5_OPT_2_VALID |
458                             F_RX_CHANNEL |
459                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
460                                          (f->fs.dirsteer << 1)) |
461                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
462 }
463
464 /**
465  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
466  */
467 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
468                             unsigned int qid_filterid, struct adapter *adap)
469 {
470         struct cpl_t6_act_open_req *req = NULL;
471
472         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
473         case CHELSIO_T6:
474                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
475
476                 INIT_TP_WR(req, 0);
477                 break;
478         default:
479                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
480                 return;
481         }
482
483         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
484                                                     qid_filterid));
485         req->local_port = cpu_to_be16(f->fs.val.lport);
486         req->peer_port = cpu_to_be16(f->fs.val.fport);
487         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
488                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
489         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
490                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
491         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
492                                         f->fs.newvlan == VLAN_REWRITE) |
493                                 V_DELACK(f->fs.hitcnts) |
494                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
495                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
496                                            << 1) |
497                                 V_TX_CHAN(f->fs.eport) |
498                                 V_ULP_MODE(ULP_MODE_NONE) |
499                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
500         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
501         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
502                             V_RSS_QUEUE(f->fs.iq) |
503                             F_T5_OPT_2_VALID |
504                             F_RX_CHANNEL |
505                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
506                                          (f->fs.dirsteer << 1)) |
507                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
508 }
509
510 /**
511  * Set the specified hash filter.
512  */
513 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
514                                  struct ch_filter_specification *fs,
515                                  struct filter_ctx *ctx)
516 {
517         struct port_info *pi = ethdev2pinfo(dev);
518         struct adapter *adapter = pi->adapter;
519         struct tid_info *t = &adapter->tids;
520         struct filter_entry *f;
521         struct rte_mbuf *mbuf;
522         struct sge_ctrl_txq *ctrlq;
523         unsigned int iq;
524         int atid, size;
525         int ret = 0;
526
527         ret = validate_filter(adapter, fs);
528         if (ret)
529                 return ret;
530
531         iq = get_filter_steerq(dev, fs);
532
533         ctrlq = &adapter->sge.ctrlq[pi->port_id];
534
535         f = t4_os_alloc(sizeof(*f));
536         if (!f)
537                 goto out_err;
538
539         f->fs = *fs;
540         f->ctx = ctx;
541         f->dev = dev;
542         f->fs.iq = iq;
543
544         /*
545          * If the new filter requires loopback Destination MAC and/or VLAN
546          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
547          * the filter.
548          */
549         if (f->fs.newvlan == VLAN_INSERT ||
550             f->fs.newvlan == VLAN_REWRITE) {
551                 /* allocate L2T entry for new filter */
552                 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
553                                                    f->fs.eport, f->fs.dmac);
554                 if (!f->l2t) {
555                         ret = -ENOMEM;
556                         goto out_err;
557                 }
558         }
559
560         atid = cxgbe_alloc_atid(t, f);
561         if (atid < 0)
562                 goto out_err;
563
564         if (f->fs.type) {
565                 /* IPv6 hash filter */
566                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
567                 if (!f->clipt)
568                         goto free_atid;
569
570                 size = sizeof(struct cpl_t6_act_open_req6);
571                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
572                 if (!mbuf) {
573                         ret = -ENOMEM;
574                         goto free_clip;
575                 }
576
577                 mbuf->data_len = size;
578                 mbuf->pkt_len = mbuf->data_len;
579
580                 mk_act_open_req6(f, mbuf,
581                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
582                                  adapter);
583         } else {
584                 /* IPv4 hash filter */
585                 size = sizeof(struct cpl_t6_act_open_req);
586                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
587                 if (!mbuf) {
588                         ret = -ENOMEM;
589                         goto free_atid;
590                 }
591
592                 mbuf->data_len = size;
593                 mbuf->pkt_len = mbuf->data_len;
594
595                 mk_act_open_req(f, mbuf,
596                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
597                                 adapter);
598         }
599
600         f->pending = 1;
601         t4_mgmt_tx(ctrlq, mbuf);
602         return 0;
603
604 free_clip:
605         cxgbe_clip_release(f->dev, f->clipt);
606 free_atid:
607         cxgbe_free_atid(t, atid);
608
609 out_err:
610         t4_os_free(f);
611         return ret;
612 }
613
614 /**
615  * Clear a filter and release any of its resources that we own.  This also
616  * clears the filter's "pending" status.
617  */
618 void clear_filter(struct filter_entry *f)
619 {
620         if (f->clipt)
621                 cxgbe_clip_release(f->dev, f->clipt);
622
623         /*
624          * The zeroing of the filter rule below clears the filter valid,
625          * pending, locked flags etc. so it's all we need for
626          * this operation.
627          */
628         memset(f, 0, sizeof(*f));
629 }
630
631 /**
632  * t4_mk_filtdelwr - create a delete filter WR
633  * @adap: adapter context
634  * @ftid: the filter ID
635  * @wr: the filter work request to populate
636  * @qid: ingress queue to receive the delete notification
637  *
638  * Creates a filter work request to delete the supplied filter.  If @qid is
639  * negative the delete notification is suppressed.
640  */
641 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
642                             struct fw_filter2_wr *wr, int qid)
643 {
644         memset(wr, 0, sizeof(*wr));
645         if (adap->params.filter2_wr_support)
646                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
647         else
648                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
649         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
650         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
651                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
652         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
653         if (qid >= 0)
654                 wr->rx_chan_rx_rpl_iq =
655                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
656 }
657
658 /**
659  * Create FW work request to delete the filter at a specified index
660  */
661 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
662 {
663         struct adapter *adapter = ethdev2adap(dev);
664         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
665         struct rte_mbuf *mbuf;
666         struct fw_filter2_wr *fwr;
667         struct sge_ctrl_txq *ctrlq;
668         unsigned int port_id = ethdev2pinfo(dev)->port_id;
669
670         ctrlq = &adapter->sge.ctrlq[port_id];
671         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
672         if (!mbuf)
673                 return -ENOMEM;
674
675         mbuf->data_len = sizeof(*fwr);
676         mbuf->pkt_len = mbuf->data_len;
677
678         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
679         t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
680
681         /*
682          * Mark the filter as "pending" and ship off the Filter Work Request.
683          * When we get the Work Request Reply we'll clear the pending status.
684          */
685         f->pending = 1;
686         t4_mgmt_tx(ctrlq, mbuf);
687         return 0;
688 }
689
690 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
691 {
692         struct adapter *adapter = ethdev2adap(dev);
693         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
694         struct rte_mbuf *mbuf;
695         struct fw_filter2_wr *fwr;
696         struct sge_ctrl_txq *ctrlq;
697         unsigned int port_id = ethdev2pinfo(dev)->port_id;
698         int ret;
699
700         /*
701          * If the new filter requires loopback Destination MAC and/or VLAN
702          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
703          * the filter.
704          */
705         if (f->fs.newvlan) {
706                 /* allocate L2T entry for new filter */
707                 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
708                                                    f->fs.eport, f->fs.dmac);
709                 if (!f->l2t)
710                         return -ENOMEM;
711         }
712
713         ctrlq = &adapter->sge.ctrlq[port_id];
714         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
715         if (!mbuf) {
716                 ret = -ENOMEM;
717                 goto out;
718         }
719
720         mbuf->data_len = sizeof(*fwr);
721         mbuf->pkt_len = mbuf->data_len;
722
723         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
724         memset(fwr, 0, sizeof(*fwr));
725
726         /*
727          * Construct the work request to set the filter.
728          */
729         if (adapter->params.filter2_wr_support)
730                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
731         else
732                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
733         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
734         fwr->tid_to_iq =
735                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
736                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
737                             V_FW_FILTER_WR_NOREPLY(0) |
738                             V_FW_FILTER_WR_IQ(f->fs.iq));
739         fwr->del_filter_to_l2tix =
740                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
741                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
742                             V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
743                             V_FW_FILTER_WR_INSVLAN
744                                 (f->fs.newvlan == VLAN_INSERT ||
745                                  f->fs.newvlan == VLAN_REWRITE) |
746                             V_FW_FILTER_WR_RMVLAN
747                                 (f->fs.newvlan == VLAN_REMOVE ||
748                                  f->fs.newvlan == VLAN_REWRITE) |
749                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
750                             V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
751                             V_FW_FILTER_WR_PRIO(f->fs.prio) |
752                             V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
753         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
754         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
755         fwr->smac_sel = 0;
756         fwr->rx_chan_rx_rpl_iq =
757                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
758                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
759                                                      ));
760         fwr->maci_to_matchtypem =
761                 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
762                             V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
763                             V_FW_FILTER_WR_PORT(f->fs.val.iport) |
764                             V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
765         fwr->ptcl = f->fs.val.proto;
766         fwr->ptclm = f->fs.mask.proto;
767         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
768         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
769         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
770         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
771         fwr->lp = cpu_to_be16(f->fs.val.lport);
772         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
773         fwr->fp = cpu_to_be16(f->fs.val.fport);
774         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
775
776         if (adapter->params.filter2_wr_support && f->fs.nat_mode) {
777                 fwr->natmode_to_ulp_type =
778                         V_FW_FILTER2_WR_ULP_TYPE(ULP_MODE_TCPDDP) |
779                         V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
780                 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
781                 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
782                 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
783                 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
784         }
785
786         /*
787          * Mark the filter as "pending" and ship off the Filter Work Request.
788          * When we get the Work Request Reply we'll clear the pending status.
789          */
790         f->pending = 1;
791         t4_mgmt_tx(ctrlq, mbuf);
792         return 0;
793
794 out:
795         return ret;
796 }
797
798 /**
799  * Set the corresponding entry in the bitmap. 4 slots are
800  * marked for IPv6, whereas only 1 slot is marked for IPv4.
801  */
802 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
803 {
804         t4_os_lock(&t->ftid_lock);
805         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
806                 t4_os_unlock(&t->ftid_lock);
807                 return -EBUSY;
808         }
809
810         if (family == FILTER_TYPE_IPV4) {
811                 rte_bitmap_set(t->ftid_bmap, fidx);
812         } else {
813                 rte_bitmap_set(t->ftid_bmap, fidx);
814                 rte_bitmap_set(t->ftid_bmap, fidx + 1);
815                 rte_bitmap_set(t->ftid_bmap, fidx + 2);
816                 rte_bitmap_set(t->ftid_bmap, fidx + 3);
817         }
818         t4_os_unlock(&t->ftid_lock);
819         return 0;
820 }
821
822 /**
823  * Clear the corresponding entry in the bitmap. 4 slots are
824  * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
825  */
826 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
827 {
828         t4_os_lock(&t->ftid_lock);
829         if (family == FILTER_TYPE_IPV4) {
830                 rte_bitmap_clear(t->ftid_bmap, fidx);
831         } else {
832                 rte_bitmap_clear(t->ftid_bmap, fidx);
833                 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
834                 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
835                 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
836         }
837         t4_os_unlock(&t->ftid_lock);
838 }
839
840 /**
841  * Check a delete filter request for validity and send it to the hardware.
842  * Return 0 on success, an error number otherwise.  We attach any provided
843  * filter operation context to the internal filter specification in order to
844  * facilitate signaling completion of the operation.
845  */
846 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
847                      struct ch_filter_specification *fs,
848                      struct filter_ctx *ctx)
849 {
850         struct port_info *pi = (struct port_info *)(dev->data->dev_private);
851         struct adapter *adapter = pi->adapter;
852         struct filter_entry *f;
853         unsigned int chip_ver;
854         int ret;
855
856         if (is_hashfilter(adapter) && fs->cap)
857                 return cxgbe_del_hash_filter(dev, filter_id, ctx);
858
859         if (filter_id >= adapter->tids.nftids)
860                 return -ERANGE;
861
862         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
863
864         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
865         if (!ret) {
866                 dev_warn(adap, "%s: could not find filter entry: %u\n",
867                          __func__, filter_id);
868                 return -EINVAL;
869         }
870
871         /*
872          * Ensure filter id is aligned on the 2 slot boundary for T6,
873          * and 4 slot boundary for cards below T6.
874          */
875         if (fs->type) {
876                 if (chip_ver < CHELSIO_T6)
877                         filter_id &= ~(0x3);
878                 else
879                         filter_id &= ~(0x1);
880         }
881
882         f = &adapter->tids.ftid_tab[filter_id];
883         ret = writable_filter(f);
884         if (ret)
885                 return ret;
886
887         if (f->valid) {
888                 f->ctx = ctx;
889                 cxgbe_clear_ftid(&adapter->tids,
890                                  f->tid - adapter->tids.ftid_base,
891                                  f->fs.type ? FILTER_TYPE_IPV6 :
892                                               FILTER_TYPE_IPV4);
893                 return del_filter_wr(dev, filter_id);
894         }
895
896         /*
897          * If the caller has passed in a Completion Context then we need to
898          * mark it as a successful completion so they don't stall waiting
899          * for it.
900          */
901         if (ctx) {
902                 ctx->result = 0;
903                 t4_complete(&ctx->completion);
904         }
905
906         return 0;
907 }
908
909 /**
910  * Check a Chelsio Filter Request for validity, convert it into our internal
911  * format and send it to the hardware.  Return 0 on success, an error number
912  * otherwise.  We attach any provided filter operation context to the internal
913  * filter specification in order to facilitate signaling completion of the
914  * operation.
915  */
916 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
917                      struct ch_filter_specification *fs,
918                      struct filter_ctx *ctx)
919 {
920         struct port_info *pi = ethdev2pinfo(dev);
921         struct adapter *adapter = pi->adapter;
922         unsigned int fidx, iq, fid_bit = 0;
923         struct filter_entry *f;
924         unsigned int chip_ver;
925         uint8_t bitoff[16] = {0};
926         int ret;
927
928         if (is_hashfilter(adapter) && fs->cap)
929                 return cxgbe_set_hash_filter(dev, fs, ctx);
930
931         if (filter_id >= adapter->tids.nftids)
932                 return -ERANGE;
933
934         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
935
936         ret = validate_filter(adapter, fs);
937         if (ret)
938                 return ret;
939
940         /*
941          * Ensure filter id is aligned on the 4 slot boundary for IPv6
942          * maskfull filters.
943          */
944         if (fs->type)
945                 filter_id &= ~(0x3);
946
947         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
948         if (ret)
949                 return -EBUSY;
950
951         iq = get_filter_steerq(dev, fs);
952
953         /*
954          * IPv6 filters occupy four slots and must be aligned on four-slot
955          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
956          * must be aligned on two-slot boundaries.
957          *
958          * IPv4 filters only occupy a single slot and have no alignment
959          * requirements but writing a new IPv4 filter into the middle
960          * of an existing IPv6 filter requires clearing the old IPv6
961          * filter.
962          */
963         if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
964                 /*
965                  * For T6, If our IPv4 filter isn't being written to a
966                  * multiple of two filter index and there's an IPv6
967                  * filter at the multiple of 2 base slot, then we need
968                  * to delete that IPv6 filter ...
969                  * For adapters below T6, IPv6 filter occupies 4 entries.
970                  */
971                 if (chip_ver < CHELSIO_T6)
972                         fidx = filter_id & ~0x3;
973                 else
974                         fidx = filter_id & ~0x1;
975
976                 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
977                         f = &adapter->tids.ftid_tab[fidx];
978                         if (f->valid)
979                                 return -EBUSY;
980                 }
981         } else { /* IPv6 */
982                 unsigned int max_filter_id;
983
984                 if (chip_ver < CHELSIO_T6) {
985                         /*
986                          * Ensure that the IPv6 filter is aligned on a
987                          * multiple of 4 boundary.
988                          */
989                         if (filter_id & 0x3)
990                                 return -EINVAL;
991
992                         max_filter_id = filter_id + 4;
993                 } else {
994                         /*
995                          * For T6, CLIP being enabled, IPv6 filter would occupy
996                          * 2 entries.
997                          */
998                         if (filter_id & 0x1)
999                                 return -EINVAL;
1000
1001                         max_filter_id = filter_id + 2;
1002                 }
1003
1004                 /*
1005                  * Check all except the base overlapping IPv4 filter
1006                  * slots.
1007                  */
1008                 for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
1009                         f = &adapter->tids.ftid_tab[fidx];
1010                         if (f->valid)
1011                                 return -EBUSY;
1012                 }
1013         }
1014
1015         /*
1016          * Check to make sure that provided filter index is not
1017          * already in use by someone else
1018          */
1019         f = &adapter->tids.ftid_tab[filter_id];
1020         if (f->valid)
1021                 return -EBUSY;
1022
1023         fidx = adapter->tids.ftid_base + filter_id;
1024         fid_bit = filter_id;
1025         ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
1026                              fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
1027         if (ret)
1028                 return ret;
1029
1030         /*
1031          * Check to make sure the filter requested is writable ...
1032          */
1033         ret = writable_filter(f);
1034         if (ret) {
1035                 /* Clear the bits we have set above */
1036                 cxgbe_clear_ftid(&adapter->tids, fid_bit,
1037                                  fs->type ? FILTER_TYPE_IPV6 :
1038                                             FILTER_TYPE_IPV4);
1039                 return ret;
1040         }
1041
1042         /*
1043          * Allocate a clip table entry only if we have non-zero IPv6 address
1044          */
1045         if (chip_ver > CHELSIO_T5 && fs->type &&
1046             memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
1047                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
1048                 if (!f->clipt)
1049                         goto free_tid;
1050         }
1051
1052         /*
1053          * Convert the filter specification into our internal format.
1054          * We copy the PF/VF specification into the Outer VLAN field
1055          * here so the rest of the code -- including the interface to
1056          * the firmware -- doesn't have to constantly do these checks.
1057          */
1058         f->fs = *fs;
1059         f->fs.iq = iq;
1060         f->dev = dev;
1061
1062         /*
1063          * Attempt to set the filter.  If we don't succeed, we clear
1064          * it and return the failure.
1065          */
1066         f->ctx = ctx;
1067         f->tid = fidx; /* Save the actual tid */
1068         ret = set_filter_wr(dev, filter_id);
1069         if (ret) {
1070                 fid_bit = f->tid - adapter->tids.ftid_base;
1071                 goto free_tid;
1072         }
1073
1074         return ret;
1075
1076 free_tid:
1077         cxgbe_clear_ftid(&adapter->tids, fid_bit,
1078                          fs->type ? FILTER_TYPE_IPV6 :
1079                                     FILTER_TYPE_IPV4);
1080         clear_filter(f);
1081         return ret;
1082 }
1083
1084 /**
1085  * Handle a Hash filter write reply.
1086  */
1087 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1088 {
1089         struct tid_info *t = &adap->tids;
1090         struct filter_entry *f;
1091         struct filter_ctx *ctx = NULL;
1092         unsigned int tid = GET_TID(rpl);
1093         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1094                                       (be32_to_cpu(rpl->atid_status)));
1095         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1096
1097         f = lookup_atid(t, ftid);
1098         if (!f) {
1099                 dev_warn(adap, "%s: could not find filter entry: %d\n",
1100                          __func__, ftid);
1101                 return;
1102         }
1103
1104         ctx = f->ctx;
1105         f->ctx = NULL;
1106
1107         switch (status) {
1108         case CPL_ERR_NONE: {
1109                 f->tid = tid;
1110                 f->pending = 0;  /* asynchronous setup completed */
1111                 f->valid = 1;
1112
1113                 cxgbe_insert_tid(t, f, f->tid, 0);
1114                 cxgbe_free_atid(t, ftid);
1115                 if (ctx) {
1116                         ctx->tid = f->tid;
1117                         ctx->result = 0;
1118                 }
1119                 if (f->fs.hitcnts)
1120                         set_tcb_field(adap, tid,
1121                                       W_TCB_TIMESTAMP,
1122                                       V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1123                                       V_TCB_T_RTT_TS_RECENT_AGE
1124                                               (M_TCB_T_RTT_TS_RECENT_AGE),
1125                                       V_TCB_TIMESTAMP(0ULL) |
1126                                       V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1127                                       1);
1128                 if (f->fs.newvlan == VLAN_INSERT ||
1129                     f->fs.newvlan == VLAN_REWRITE)
1130                         set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1131                 break;
1132         }
1133         default:
1134                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1135                          __func__, status);
1136
1137                 if (ctx) {
1138                         if (status == CPL_ERR_TCAM_FULL)
1139                                 ctx->result = -EAGAIN;
1140                         else
1141                                 ctx->result = -EINVAL;
1142                 }
1143
1144                 cxgbe_free_atid(t, ftid);
1145                 t4_os_free(f);
1146         }
1147
1148         if (ctx)
1149                 t4_complete(&ctx->completion);
1150 }
1151
1152 /**
1153  * Handle a LE-TCAM filter write/deletion reply.
1154  */
1155 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1156 {
1157         struct filter_entry *f = NULL;
1158         unsigned int tid = GET_TID(rpl);
1159         int idx, max_fidx = adap->tids.nftids;
1160
1161         /* Get the corresponding filter entry for this tid */
1162         if (adap->tids.ftid_tab) {
1163                 /* Check this in normal filter region */
1164                 idx = tid - adap->tids.ftid_base;
1165                 if (idx >= max_fidx)
1166                         return;
1167
1168                 f = &adap->tids.ftid_tab[idx];
1169                 if (f->tid != tid)
1170                         return;
1171         }
1172
1173         /* We found the filter entry for this tid */
1174         if (f) {
1175                 unsigned int ret = G_COOKIE(rpl->cookie);
1176                 struct filter_ctx *ctx;
1177
1178                 /*
1179                  * Pull off any filter operation context attached to the
1180                  * filter.
1181                  */
1182                 ctx = f->ctx;
1183                 f->ctx = NULL;
1184
1185                 if (ret == FW_FILTER_WR_FLT_ADDED) {
1186                         f->pending = 0;  /* asynchronous setup completed */
1187                         f->valid = 1;
1188                         if (ctx) {
1189                                 ctx->tid = f->tid;
1190                                 ctx->result = 0;
1191                         }
1192                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1193                         /*
1194                          * Clear the filter when we get confirmation from the
1195                          * hardware that the filter has been deleted.
1196                          */
1197                         clear_filter(f);
1198                         if (ctx)
1199                                 ctx->result = 0;
1200                 } else {
1201                         /*
1202                          * Something went wrong.  Issue a warning about the
1203                          * problem and clear everything out.
1204                          */
1205                         dev_warn(adap, "filter %u setup failed with error %u\n",
1206                                  idx, ret);
1207                         clear_filter(f);
1208                         if (ctx)
1209                                 ctx->result = -EINVAL;
1210                 }
1211
1212                 if (ctx)
1213                         t4_complete(&ctx->completion);
1214         }
1215 }
1216
1217 /*
1218  * Retrieve the packet count for the specified filter.
1219  */
1220 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1221                            u64 *c, int hash, bool get_byte)
1222 {
1223         struct filter_entry *f;
1224         unsigned int tcb_base, tcbaddr;
1225         int ret;
1226
1227         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1228         if (is_hashfilter(adapter) && hash) {
1229                 if (fidx < adapter->tids.ntids) {
1230                         f = adapter->tids.tid_tab[fidx];
1231                         if (!f)
1232                                 return -EINVAL;
1233
1234                         if (is_t5(adapter->params.chip)) {
1235                                 *c = 0;
1236                                 return 0;
1237                         }
1238                         tcbaddr = tcb_base + (fidx * TCB_SIZE);
1239                         goto get_count;
1240                 } else {
1241                         return -ERANGE;
1242                 }
1243         } else {
1244                 if (fidx >= adapter->tids.nftids)
1245                         return -ERANGE;
1246
1247                 f = &adapter->tids.ftid_tab[fidx];
1248                 if (!f->valid)
1249                         return -EINVAL;
1250
1251                 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1252         }
1253
1254         f = &adapter->tids.ftid_tab[fidx];
1255         if (!f->valid)
1256                 return -EINVAL;
1257
1258 get_count:
1259         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1260                 /*
1261                  * For T5, the Filter Packet Hit Count is maintained as a
1262                  * 32-bit Big Endian value in the TCB field {timestamp}.
1263                  * Similar to the craziness above, instead of the filter hit
1264                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1265                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1266                  */
1267                 if (get_byte) {
1268                         unsigned int word_offset = 4;
1269                         __be64 be64_byte_count;
1270
1271                         t4_os_lock(&adapter->win0_lock);
1272                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1273                                            tcbaddr +
1274                                            (word_offset * sizeof(__be32)),
1275                                            sizeof(be64_byte_count),
1276                                            &be64_byte_count,
1277                                            T4_MEMORY_READ);
1278                         t4_os_unlock(&adapter->win0_lock);
1279                         if (ret < 0)
1280                                 return ret;
1281                         *c = be64_to_cpu(be64_byte_count);
1282                 } else {
1283                         unsigned int word_offset = 6;
1284                         __be32 be32_count;
1285
1286                         t4_os_lock(&adapter->win0_lock);
1287                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1288                                            tcbaddr +
1289                                            (word_offset * sizeof(__be32)),
1290                                            sizeof(be32_count), &be32_count,
1291                                            T4_MEMORY_READ);
1292                         t4_os_unlock(&adapter->win0_lock);
1293                         if (ret < 0)
1294                                 return ret;
1295                         *c = (u64)be32_to_cpu(be32_count);
1296                 }
1297         }
1298         return 0;
1299 }
1300
1301 /**
1302  * Handle a Hash filter delete reply.
1303  */
1304 void hash_del_filter_rpl(struct adapter *adap,
1305                          const struct cpl_abort_rpl_rss *rpl)
1306 {
1307         struct tid_info *t = &adap->tids;
1308         struct filter_entry *f;
1309         struct filter_ctx *ctx = NULL;
1310         unsigned int tid = GET_TID(rpl);
1311
1312         f = lookup_tid(t, tid);
1313         if (!f) {
1314                 dev_warn(adap, "%s: could not find filter entry: %u\n",
1315                          __func__, tid);
1316                 return;
1317         }
1318
1319         ctx = f->ctx;
1320         f->ctx = NULL;
1321
1322         f->valid = 0;
1323
1324         if (f->clipt)
1325                 cxgbe_clip_release(f->dev, f->clipt);
1326
1327         cxgbe_remove_tid(t, 0, tid, 0);
1328         t4_os_free(f);
1329
1330         if (ctx) {
1331                 ctx->result = 0;
1332                 t4_complete(&ctx->completion);
1333         }
1334 }