06233e41e735a67f4b26d0d23f8ec42a32ef986e
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
11 #include "clip_tbl.h"
12 #include "l2t.h"
13 #include "smt.h"
14
15 /**
16  * Initialize Hash Filters
17  */
18 int cxgbe_init_hash_filter(struct adapter *adap)
19 {
20         unsigned int n_user_filters;
21         unsigned int user_filter_perc;
22         int ret;
23         u32 params[7], val[7];
24
25 #define FW_PARAM_DEV(param) \
26         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
27         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
28
29 #define FW_PARAM_PFVF(param) \
30         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
31         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
32         V_FW_PARAMS_PARAM_Y(0) | \
33         V_FW_PARAMS_PARAM_Z(0))
34
35         params[0] = FW_PARAM_DEV(NTID);
36         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
37                               params, val);
38         if (ret < 0)
39                 return ret;
40         adap->tids.ntids = val[0];
41         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
42
43         user_filter_perc = 100;
44         n_user_filters = mult_frac(adap->tids.nftids,
45                                    user_filter_perc,
46                                    100);
47
48         adap->tids.nftids = n_user_filters;
49         adap->params.hash_filter = 1;
50         return 0;
51 }
52
53 /**
54  * Validate if the requested filter specification can be set by checking
55  * if the requested features have been enabled
56  */
57 int cxgbe_validate_filter(struct adapter *adapter,
58                           struct ch_filter_specification *fs)
59 {
60         u32 fconf, iconf;
61
62         /*
63          * Check for unconfigured fields being used.
64          */
65         fconf = fs->cap ? adapter->params.tp.filter_mask :
66                           adapter->params.tp.vlan_pri_map;
67
68         iconf = adapter->params.tp.ingress_config;
69
70 #define S(_field) \
71         (fs->val._field || fs->mask._field)
72 #define U(_mask, _field) \
73         (!(fconf & (_mask)) && S(_field))
74
75         if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
76             U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
77             U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
78             U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
79                 return -EOPNOTSUPP;
80
81         /* Either OVLAN or PFVF match is enabled in hardware, but not both */
82         if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
83             (S(ovlan_vld) && (iconf & F_VNIC)))
84                 return -EOPNOTSUPP;
85
86         /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
87         if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
88             (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
89                 return -EOPNOTSUPP;
90
91 #undef S
92 #undef U
93
94         /*
95          * If the user is requesting that the filter action loop
96          * matching packets back out one of our ports, make sure that
97          * the egress port is in range.
98          */
99         if (fs->action == FILTER_SWITCH &&
100             fs->eport >= adapter->params.nports)
101                 return -ERANGE;
102
103         /*
104          * Don't allow various trivially obvious bogus out-of-range
105          * values ...
106          */
107         if (fs->val.iport >= adapter->params.nports)
108                 return -ERANGE;
109
110         if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
111                 return -EOPNOTSUPP;
112
113         if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
114                 return -EOPNOTSUPP;
115
116         return 0;
117 }
118
119 /**
120  * Get the queue to which the traffic must be steered to.
121  */
122 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
123                                       struct ch_filter_specification *fs)
124 {
125         struct port_info *pi = ethdev2pinfo(dev);
126         struct adapter *adapter = pi->adapter;
127         unsigned int iq;
128
129         /*
130          * If the user has requested steering matching Ingress Packets
131          * to a specific Queue Set, we need to make sure it's in range
132          * for the port and map that into the Absolute Queue ID of the
133          * Queue Set's Response Queue.
134          */
135         if (!fs->dirsteer) {
136                 iq = 0;
137         } else {
138                 /*
139                  * If the iq id is greater than the number of qsets,
140                  * then assume it is an absolute qid.
141                  */
142                 if (fs->iq < pi->n_rx_qsets)
143                         iq = adapter->sge.ethrxq[pi->first_qset +
144                                                  fs->iq].rspq.abs_id;
145                 else
146                         iq = fs->iq;
147         }
148
149         return iq;
150 }
151
152 /* Return an error number if the indicated filter isn't writable ... */
153 static int writable_filter(struct filter_entry *f)
154 {
155         if (f->locked)
156                 return -EPERM;
157         if (f->pending)
158                 return -EBUSY;
159
160         return 0;
161 }
162
163 /**
164  * Send CPL_SET_TCB_FIELD message
165  */
166 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
167                           u16 word, u64 mask, u64 val, int no_reply)
168 {
169         struct rte_mbuf *mbuf;
170         struct cpl_set_tcb_field *req;
171         struct sge_ctrl_txq *ctrlq;
172
173         ctrlq = &adapter->sge.ctrlq[0];
174         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
175         WARN_ON(!mbuf);
176
177         mbuf->data_len = sizeof(*req);
178         mbuf->pkt_len = mbuf->data_len;
179
180         req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
181         memset(req, 0, sizeof(*req));
182         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
183         req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
184                                       V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
185                                       V_NO_REPLY(no_reply));
186         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
187         req->mask = cpu_to_be64(mask);
188         req->val = cpu_to_be64(val);
189
190         t4_mgmt_tx(ctrlq, mbuf);
191 }
192
193 /**
194  * Set one of the t_flags bits in the TCB.
195  */
196 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
197                           unsigned int bit_pos, unsigned int val, int no_reply)
198 {
199         set_tcb_field(adap, ftid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
200                       (unsigned long long)val << bit_pos, no_reply);
201 }
202
203 /**
204  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
205  */
206 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
207                                         struct cpl_set_tcb_field *req,
208                                         unsigned int word,
209                                         u64 mask, u64 val, u8 cookie,
210                                         int no_reply)
211 {
212         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
213         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
214
215         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
216                                       V_ULP_TXPKT_DEST(0));
217         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
218         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
219         sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
220         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
221         req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
222                                       V_QUEUENO(0));
223         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
224         req->mask = cpu_to_be64(mask);
225         req->val = cpu_to_be64(val);
226         sc = (struct ulptx_idata *)(req + 1);
227         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
228         sc->len = cpu_to_be32(0);
229 }
230
231 /**
232  * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
233  * IPv4 requires only 1 slot on all cards.
234  */
235 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
236 {
237         if (family == FILTER_TYPE_IPV6) {
238                 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
239                         return 4;
240
241                 return 2;
242         }
243
244         return 1;
245 }
246
247 /**
248  * Check if entries are already filled.
249  */
250 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
251 {
252         bool result = FALSE;
253         u32 i;
254
255         /* Ensure there's enough slots available. */
256         t4_os_lock(&t->ftid_lock);
257         for (i = fidx; i < fidx + nentries; i++) {
258                 if (rte_bitmap_get(t->ftid_bmap, i)) {
259                         result = TRUE;
260                         break;
261                 }
262         }
263         t4_os_unlock(&t->ftid_lock);
264         return result;
265 }
266
267 /**
268  * Allocate available free entries.
269  */
270 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
271 {
272         struct tid_info *t = &adap->tids;
273         int pos;
274         int size = t->nftids;
275
276         t4_os_lock(&t->ftid_lock);
277         if (nentries > 1)
278                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
279                                                     nentries);
280         else
281                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
282         t4_os_unlock(&t->ftid_lock);
283
284         return pos < size ? pos : -1;
285 }
286
287 /**
288  * Clear a filter and release any of its resources that we own.  This also
289  * clears the filter's "pending" status.
290  */
291 static void clear_filter(struct filter_entry *f)
292 {
293         if (f->clipt)
294                 cxgbe_clip_release(f->dev, f->clipt);
295
296         if (f->l2t)
297                 cxgbe_l2t_release(f->l2t);
298
299         /* The zeroing of the filter rule below clears the filter valid,
300          * pending, locked flags etc. so it's all we need for
301          * this operation.
302          */
303         memset(f, 0, sizeof(*f));
304 }
305
306 /**
307  * Construct hash filter ntuple.
308  */
309 static u64 hash_filter_ntuple(const struct filter_entry *f)
310 {
311         struct adapter *adap = ethdev2adap(f->dev);
312         struct tp_params *tp = &adap->params.tp;
313         u64 ntuple = 0;
314         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
315
316         if (tp->port_shift >= 0 && f->fs.mask.iport)
317                 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
318
319         if (tp->protocol_shift >= 0) {
320                 if (!f->fs.val.proto)
321                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
322                 else
323                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
324         }
325
326         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
327                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
328         if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
329                 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
330         if (tp->vlan_shift >= 0 && f->fs.mask.ivlan)
331                 ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
332                           tp->vlan_shift;
333         if (tp->vnic_shift >= 0) {
334                 if ((adap->params.tp.ingress_config & F_VNIC) &&
335                     f->fs.mask.pfvf_vld)
336                         ntuple |= (u64)(f->fs.val.pfvf_vld << 16 |
337                                         f->fs.val.pf << 13 | f->fs.val.vf) <<
338                                         tp->vnic_shift;
339                 else if (!(adap->params.tp.ingress_config & F_VNIC) &&
340                          f->fs.mask.ovlan_vld)
341                         ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
342                                         f->fs.val.ovlan) << tp->vnic_shift;
343         }
344         if (tp->tos_shift >= 0 && f->fs.mask.tos)
345                 ntuple |= (u64)f->fs.val.tos << tp->tos_shift;
346
347         return ntuple;
348 }
349
350 /**
351  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
352  */
353 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
354                              unsigned int tid)
355 {
356         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
357         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
358
359         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
360                                       V_ULP_TXPKT_DEST(0));
361         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
362         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
363         sc->len = cpu_to_be32(sizeof(*abort_req) -
364                               sizeof(struct work_request_hdr));
365         OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
366         abort_req->rsvd0 = cpu_to_be32(0);
367         abort_req->rsvd1 = 0;
368         abort_req->cmd = CPL_ABORT_NO_RST;
369         sc = (struct ulptx_idata *)(abort_req + 1);
370         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
371         sc->len = cpu_to_be32(0);
372 }
373
374 /**
375  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
376  */
377 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
378                              unsigned int tid)
379 {
380         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
381         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
382
383         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
384                                       V_ULP_TXPKT_DEST(0));
385         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
386         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
387         sc->len = cpu_to_be32(sizeof(*abort_rpl) -
388                               sizeof(struct work_request_hdr));
389         OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
390         abort_rpl->rsvd0 = cpu_to_be32(0);
391         abort_rpl->rsvd1 = 0;
392         abort_rpl->cmd = CPL_ABORT_NO_RST;
393         sc = (struct ulptx_idata *)(abort_rpl + 1);
394         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
395         sc->len = cpu_to_be32(0);
396 }
397
398 /**
399  * Delete the specified hash filter.
400  */
401 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
402                                  unsigned int filter_id,
403                                  struct filter_ctx *ctx)
404 {
405         struct adapter *adapter = ethdev2adap(dev);
406         struct tid_info *t = &adapter->tids;
407         struct filter_entry *f;
408         struct sge_ctrl_txq *ctrlq;
409         unsigned int port_id = ethdev2pinfo(dev)->port_id;
410         int ret;
411
412         if (filter_id > adapter->tids.ntids)
413                 return -E2BIG;
414
415         f = lookup_tid(t, filter_id);
416         if (!f) {
417                 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
418                         __func__, filter_id);
419                 return -EINVAL;
420         }
421
422         ret = writable_filter(f);
423         if (ret)
424                 return ret;
425
426         if (f->valid) {
427                 unsigned int wrlen;
428                 struct rte_mbuf *mbuf;
429                 struct work_request_hdr *wr;
430                 struct ulptx_idata *aligner;
431                 struct cpl_set_tcb_field *req;
432                 struct cpl_abort_req *abort_req;
433                 struct cpl_abort_rpl *abort_rpl;
434
435                 f->ctx = ctx;
436                 f->pending = 1;
437
438                 wrlen = cxgbe_roundup(sizeof(*wr) +
439                                       (sizeof(*req) + sizeof(*aligner)) +
440                                       sizeof(*abort_req) + sizeof(*abort_rpl),
441                                       16);
442
443                 ctrlq = &adapter->sge.ctrlq[port_id];
444                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
445                 if (!mbuf) {
446                         dev_err(adapter, "%s: could not allocate skb ..\n",
447                                 __func__);
448                         goto out_err;
449                 }
450
451                 mbuf->data_len = wrlen;
452                 mbuf->pkt_len = mbuf->data_len;
453
454                 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
455                 INIT_ULPTX_WR(req, wrlen, 0, 0);
456                 wr = (struct work_request_hdr *)req;
457                 wr++;
458                 req = (struct cpl_set_tcb_field *)wr;
459                 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
460                                 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
461                                 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
462                                 0, 1);
463                 aligner = (struct ulptx_idata *)(req + 1);
464                 abort_req = (struct cpl_abort_req *)(aligner + 1);
465                 mk_abort_req_ulp(abort_req, f->tid);
466                 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
467                 mk_abort_rpl_ulp(abort_rpl, f->tid);
468                 t4_mgmt_tx(ctrlq, mbuf);
469         }
470         return 0;
471
472 out_err:
473         return -ENOMEM;
474 }
475
476 /**
477  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
478  */
479 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
480                              unsigned int qid_filterid, struct adapter *adap)
481 {
482         struct cpl_t6_act_open_req6 *req = NULL;
483         u64 local_lo, local_hi, peer_lo, peer_hi;
484         u32 *lip = (u32 *)f->fs.val.lip;
485         u32 *fip = (u32 *)f->fs.val.fip;
486
487         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
488         case CHELSIO_T6:
489                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
490
491                 INIT_TP_WR(req, 0);
492                 break;
493         default:
494                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
495                 return;
496         }
497
498         local_hi = ((u64)lip[1]) << 32 | lip[0];
499         local_lo = ((u64)lip[3]) << 32 | lip[2];
500         peer_hi = ((u64)fip[1]) << 32 | fip[0];
501         peer_lo = ((u64)fip[3]) << 32 | fip[2];
502
503         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
504                                                     qid_filterid));
505         req->local_port = cpu_to_be16(f->fs.val.lport);
506         req->peer_port = cpu_to_be16(f->fs.val.fport);
507         req->local_ip_hi = local_hi;
508         req->local_ip_lo = local_lo;
509         req->peer_ip_hi = peer_hi;
510         req->peer_ip_lo = peer_lo;
511         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
512                                         f->fs.newvlan == VLAN_REWRITE) |
513                                 V_DELACK(f->fs.hitcnts) |
514                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
515                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
516                                            << 1) |
517                                 V_TX_CHAN(f->fs.eport) |
518                                 V_ULP_MODE(ULP_MODE_NONE) |
519                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
520         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
521         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
522                             V_RSS_QUEUE(f->fs.iq) |
523                             F_T5_OPT_2_VALID |
524                             F_RX_CHANNEL |
525                             V_SACK_EN(f->fs.swapmac) |
526                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
527                                          (f->fs.dirsteer << 1)) |
528                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
529 }
530
531 /**
532  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
533  */
534 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
535                             unsigned int qid_filterid, struct adapter *adap)
536 {
537         struct cpl_t6_act_open_req *req = NULL;
538
539         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
540         case CHELSIO_T6:
541                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
542
543                 INIT_TP_WR(req, 0);
544                 break;
545         default:
546                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
547                 return;
548         }
549
550         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
551                                                     qid_filterid));
552         req->local_port = cpu_to_be16(f->fs.val.lport);
553         req->peer_port = cpu_to_be16(f->fs.val.fport);
554         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
555                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
556         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
557                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
558         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
559                                         f->fs.newvlan == VLAN_REWRITE) |
560                                 V_DELACK(f->fs.hitcnts) |
561                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
562                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
563                                            << 1) |
564                                 V_TX_CHAN(f->fs.eport) |
565                                 V_ULP_MODE(ULP_MODE_NONE) |
566                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
567         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
568         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
569                             V_RSS_QUEUE(f->fs.iq) |
570                             F_T5_OPT_2_VALID |
571                             F_RX_CHANNEL |
572                             V_SACK_EN(f->fs.swapmac) |
573                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
574                                          (f->fs.dirsteer << 1)) |
575                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
576 }
577
578 /**
579  * Set the specified hash filter.
580  */
581 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
582                                  struct ch_filter_specification *fs,
583                                  struct filter_ctx *ctx)
584 {
585         struct port_info *pi = ethdev2pinfo(dev);
586         struct adapter *adapter = pi->adapter;
587         struct tid_info *t = &adapter->tids;
588         struct filter_entry *f;
589         struct rte_mbuf *mbuf;
590         struct sge_ctrl_txq *ctrlq;
591         unsigned int iq;
592         int atid, size;
593         int ret = 0;
594
595         ret = cxgbe_validate_filter(adapter, fs);
596         if (ret)
597                 return ret;
598
599         iq = get_filter_steerq(dev, fs);
600
601         ctrlq = &adapter->sge.ctrlq[pi->port_id];
602
603         f = t4_os_alloc(sizeof(*f));
604         if (!f)
605                 return -ENOMEM;
606
607         f->fs = *fs;
608         f->ctx = ctx;
609         f->dev = dev;
610         f->fs.iq = iq;
611
612         /*
613          * If the new filter requires loopback Destination MAC and/or VLAN
614          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
615          * the filter.
616          */
617         if (f->fs.newdmac || f->fs.newvlan == VLAN_INSERT ||
618             f->fs.newvlan == VLAN_REWRITE) {
619                 /* allocate L2T entry for new filter */
620                 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
621                                                    f->fs.eport, f->fs.dmac);
622                 if (!f->l2t) {
623                         ret = -ENOMEM;
624                         goto out_err;
625                 }
626         }
627
628         /* If the new filter requires Source MAC rewriting then we need to
629          * allocate a SMT entry for the filter
630          */
631         if (f->fs.newsmac) {
632                 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
633                 if (!f->smt) {
634                         ret = -EAGAIN;
635                         goto out_err;
636                 }
637         }
638
639         atid = cxgbe_alloc_atid(t, f);
640         if (atid < 0)
641                 goto out_err;
642
643         if (f->fs.type == FILTER_TYPE_IPV6) {
644                 /* IPv6 hash filter */
645                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
646                 if (!f->clipt)
647                         goto free_atid;
648
649                 size = sizeof(struct cpl_t6_act_open_req6);
650                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
651                 if (!mbuf) {
652                         ret = -ENOMEM;
653                         goto free_atid;
654                 }
655
656                 mbuf->data_len = size;
657                 mbuf->pkt_len = mbuf->data_len;
658
659                 mk_act_open_req6(f, mbuf,
660                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
661                                  adapter);
662         } else {
663                 /* IPv4 hash filter */
664                 size = sizeof(struct cpl_t6_act_open_req);
665                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
666                 if (!mbuf) {
667                         ret = -ENOMEM;
668                         goto free_atid;
669                 }
670
671                 mbuf->data_len = size;
672                 mbuf->pkt_len = mbuf->data_len;
673
674                 mk_act_open_req(f, mbuf,
675                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
676                                 adapter);
677         }
678
679         f->pending = 1;
680         t4_mgmt_tx(ctrlq, mbuf);
681         return 0;
682
683 free_atid:
684         cxgbe_free_atid(t, atid);
685
686 out_err:
687         clear_filter(f);
688         t4_os_free(f);
689         return ret;
690 }
691
692 /**
693  * t4_mk_filtdelwr - create a delete filter WR
694  * @adap: adapter context
695  * @ftid: the filter ID
696  * @wr: the filter work request to populate
697  * @qid: ingress queue to receive the delete notification
698  *
699  * Creates a filter work request to delete the supplied filter.  If @qid is
700  * negative the delete notification is suppressed.
701  */
702 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
703                             struct fw_filter2_wr *wr, int qid)
704 {
705         memset(wr, 0, sizeof(*wr));
706         if (adap->params.filter2_wr_support)
707                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
708         else
709                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
710         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
711         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
712                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
713         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
714         if (qid >= 0)
715                 wr->rx_chan_rx_rpl_iq =
716                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
717 }
718
719 /**
720  * Create FW work request to delete the filter at a specified index
721  */
722 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
723 {
724         struct adapter *adapter = ethdev2adap(dev);
725         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
726         struct rte_mbuf *mbuf;
727         struct fw_filter2_wr *fwr;
728         struct sge_ctrl_txq *ctrlq;
729         unsigned int port_id = ethdev2pinfo(dev)->port_id;
730
731         ctrlq = &adapter->sge.ctrlq[port_id];
732         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
733         if (!mbuf)
734                 return -ENOMEM;
735
736         mbuf->data_len = sizeof(*fwr);
737         mbuf->pkt_len = mbuf->data_len;
738
739         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
740         t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
741
742         /*
743          * Mark the filter as "pending" and ship off the Filter Work Request.
744          * When we get the Work Request Reply we'll clear the pending status.
745          */
746         f->pending = 1;
747         t4_mgmt_tx(ctrlq, mbuf);
748         return 0;
749 }
750
751 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
752 {
753         struct adapter *adapter = ethdev2adap(dev);
754         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
755         struct rte_mbuf *mbuf;
756         struct fw_filter2_wr *fwr;
757         struct sge_ctrl_txq *ctrlq;
758         unsigned int port_id = ethdev2pinfo(dev)->port_id;
759         int ret;
760
761         /* If the new filter requires Source MAC rewriting then we need to
762          * allocate a SMT entry for the filter
763          */
764         if (f->fs.newsmac) {
765                 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
766                 if (!f->smt) {
767                         if (f->l2t) {
768                                 cxgbe_l2t_release(f->l2t);
769                                 f->l2t = NULL;
770                         }
771                         return -ENOMEM;
772                 }
773         }
774
775         ctrlq = &adapter->sge.ctrlq[port_id];
776         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
777         if (!mbuf) {
778                 ret = -ENOMEM;
779                 goto out;
780         }
781
782         mbuf->data_len = sizeof(*fwr);
783         mbuf->pkt_len = mbuf->data_len;
784
785         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
786         memset(fwr, 0, sizeof(*fwr));
787
788         /*
789          * Construct the work request to set the filter.
790          */
791         if (adapter->params.filter2_wr_support)
792                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
793         else
794                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
795         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
796         fwr->tid_to_iq =
797                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
798                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
799                             V_FW_FILTER_WR_NOREPLY(0) |
800                             V_FW_FILTER_WR_IQ(f->fs.iq));
801         fwr->del_filter_to_l2tix =
802                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
803                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
804                             V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
805                             V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
806                             V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
807                             V_FW_FILTER_WR_INSVLAN
808                                 (f->fs.newvlan == VLAN_INSERT ||
809                                  f->fs.newvlan == VLAN_REWRITE) |
810                             V_FW_FILTER_WR_RMVLAN
811                                 (f->fs.newvlan == VLAN_REMOVE ||
812                                  f->fs.newvlan == VLAN_REWRITE) |
813                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
814                             V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
815                             V_FW_FILTER_WR_PRIO(f->fs.prio) |
816                             V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
817         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
818         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
819         fwr->frag_to_ovlan_vldm =
820                 (V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
821                  V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
822                  V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
823                  V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
824         fwr->smac_sel = f->smt ? f->smt->hw_idx : 0;
825         fwr->rx_chan_rx_rpl_iq =
826                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
827                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
828                                                      ));
829         fwr->maci_to_matchtypem =
830                 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
831                             V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
832                             V_FW_FILTER_WR_PORT(f->fs.val.iport) |
833                             V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
834         fwr->ptcl = f->fs.val.proto;
835         fwr->ptclm = f->fs.mask.proto;
836         fwr->ttyp = f->fs.val.tos;
837         fwr->ttypm = f->fs.mask.tos;
838         fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
839         fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
840         fwr->ovlan = cpu_to_be16(f->fs.val.ovlan);
841         fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan);
842         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
843         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
844         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
845         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
846         fwr->lp = cpu_to_be16(f->fs.val.lport);
847         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
848         fwr->fp = cpu_to_be16(f->fs.val.fport);
849         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
850
851         if (adapter->params.filter2_wr_support) {
852                 fwr->filter_type_swapmac =
853                          V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
854                 fwr->natmode_to_ulp_type =
855                         V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
856                                                  ULP_MODE_TCPDDP :
857                                                  ULP_MODE_NONE) |
858                         V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
859                 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
860                 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
861                 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
862                 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
863         }
864
865         /*
866          * Mark the filter as "pending" and ship off the Filter Work Request.
867          * When we get the Work Request Reply we'll clear the pending status.
868          */
869         f->pending = 1;
870         t4_mgmt_tx(ctrlq, mbuf);
871         return 0;
872
873 out:
874         return ret;
875 }
876
877 /**
878  * Set the corresponding entries in the bitmap.
879  */
880 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
881 {
882         u32 i;
883
884         t4_os_lock(&t->ftid_lock);
885         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
886                 t4_os_unlock(&t->ftid_lock);
887                 return -EBUSY;
888         }
889
890         for (i = fidx; i < fidx + nentries; i++)
891                 rte_bitmap_set(t->ftid_bmap, i);
892         t4_os_unlock(&t->ftid_lock);
893         return 0;
894 }
895
896 /**
897  * Clear the corresponding entries in the bitmap.
898  */
899 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
900 {
901         u32 i;
902
903         t4_os_lock(&t->ftid_lock);
904         for (i = fidx; i < fidx + nentries; i++)
905                 rte_bitmap_clear(t->ftid_bmap, i);
906         t4_os_unlock(&t->ftid_lock);
907 }
908
909 /**
910  * Check a delete filter request for validity and send it to the hardware.
911  * Return 0 on success, an error number otherwise.  We attach any provided
912  * filter operation context to the internal filter specification in order to
913  * facilitate signaling completion of the operation.
914  */
915 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
916                      struct ch_filter_specification *fs,
917                      struct filter_ctx *ctx)
918 {
919         struct port_info *pi = dev->data->dev_private;
920         struct adapter *adapter = pi->adapter;
921         struct filter_entry *f;
922         unsigned int chip_ver;
923         u8 nentries;
924         int ret;
925
926         if (is_hashfilter(adapter) && fs->cap)
927                 return cxgbe_del_hash_filter(dev, filter_id, ctx);
928
929         if (filter_id >= adapter->tids.nftids)
930                 return -ERANGE;
931
932         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
933
934         /*
935          * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
936          * and 4 slot boundary for cards below T6.
937          */
938         if (fs->type == FILTER_TYPE_IPV6) {
939                 if (chip_ver < CHELSIO_T6)
940                         filter_id &= ~(0x3);
941                 else
942                         filter_id &= ~(0x1);
943         }
944
945         nentries = cxgbe_filter_slots(adapter, fs->type);
946         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
947         if (!ret) {
948                 dev_warn(adap, "%s: could not find filter entry: %u\n",
949                          __func__, filter_id);
950                 return -EINVAL;
951         }
952
953         f = &adapter->tids.ftid_tab[filter_id];
954         ret = writable_filter(f);
955         if (ret)
956                 return ret;
957
958         if (f->valid) {
959                 f->ctx = ctx;
960                 cxgbe_clear_ftid(&adapter->tids,
961                                  f->tid - adapter->tids.ftid_base,
962                                  nentries);
963                 return del_filter_wr(dev, filter_id);
964         }
965
966         /*
967          * If the caller has passed in a Completion Context then we need to
968          * mark it as a successful completion so they don't stall waiting
969          * for it.
970          */
971         if (ctx) {
972                 ctx->result = 0;
973                 t4_complete(&ctx->completion);
974         }
975
976         return 0;
977 }
978
979 /**
980  * Check a Chelsio Filter Request for validity, convert it into our internal
981  * format and send it to the hardware.  Return 0 on success, an error number
982  * otherwise.  We attach any provided filter operation context to the internal
983  * filter specification in order to facilitate signaling completion of the
984  * operation.
985  */
986 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
987                      struct ch_filter_specification *fs,
988                      struct filter_ctx *ctx)
989 {
990         struct port_info *pi = ethdev2pinfo(dev);
991         struct adapter *adapter = pi->adapter;
992         u8 nentries, bitoff[16] = {0};
993         struct filter_entry *f;
994         unsigned int chip_ver;
995         unsigned int fidx, iq;
996         u32 iconf;
997         int ret;
998
999         if (is_hashfilter(adapter) && fs->cap)
1000                 return cxgbe_set_hash_filter(dev, fs, ctx);
1001
1002         if (filter_id >= adapter->tids.nftids)
1003                 return -ERANGE;
1004
1005         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1006
1007         ret = cxgbe_validate_filter(adapter, fs);
1008         if (ret)
1009                 return ret;
1010
1011         /*
1012          * IPv6 filters occupy four slots and must be aligned on four-slot
1013          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
1014          * must be aligned on two-slot boundaries.
1015          *
1016          * IPv4 filters only occupy a single slot and have no alignment
1017          * requirements.
1018          */
1019         fidx = filter_id;
1020         if (fs->type == FILTER_TYPE_IPV6) {
1021                 if (chip_ver < CHELSIO_T6)
1022                         fidx &= ~(0x3);
1023                 else
1024                         fidx &= ~(0x1);
1025         }
1026
1027         if (fidx != filter_id)
1028                 return -EINVAL;
1029
1030         nentries = cxgbe_filter_slots(adapter, fs->type);
1031         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
1032         if (ret)
1033                 return -EBUSY;
1034
1035         iq = get_filter_steerq(dev, fs);
1036
1037         /*
1038          * Check to make sure that provided filter index is not
1039          * already in use by someone else
1040          */
1041         f = &adapter->tids.ftid_tab[filter_id];
1042         if (f->valid)
1043                 return -EBUSY;
1044
1045         fidx = adapter->tids.ftid_base + filter_id;
1046         ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
1047         if (ret)
1048                 return ret;
1049
1050         /*
1051          * Check to make sure the filter requested is writable ...
1052          */
1053         ret = writable_filter(f);
1054         if (ret) {
1055                 /* Clear the bits we have set above */
1056                 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1057                 return ret;
1058         }
1059
1060         /*
1061          * Convert the filter specification into our internal format.
1062          * We copy the PF/VF specification into the Outer VLAN field
1063          * here so the rest of the code -- including the interface to
1064          * the firmware -- doesn't have to constantly do these checks.
1065          */
1066         f->fs = *fs;
1067         f->fs.iq = iq;
1068         f->dev = dev;
1069
1070         /* Allocate a clip table entry only if we have non-zero IPv6 address. */
1071         if (chip_ver > CHELSIO_T5 && f->fs.type &&
1072             memcmp(f->fs.val.lip, bitoff, sizeof(bitoff))) {
1073                 f->clipt = cxgbe_clip_alloc(dev, (u32 *)&f->fs.val.lip);
1074                 if (!f->clipt) {
1075                         ret = -ENOMEM;
1076                         goto free_tid;
1077                 }
1078         }
1079
1080         /* If the new filter requires loopback Destination MAC and/or VLAN
1081          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1082          * the filter.
1083          */
1084         if (f->fs.newvlan || f->fs.newdmac) {
1085                 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
1086                                                    f->fs.eport, f->fs.dmac);
1087                 if (!f->l2t) {
1088                         ret = -ENOMEM;
1089                         goto free_tid;
1090                 }
1091         }
1092
1093         iconf = adapter->params.tp.ingress_config;
1094
1095         /* Either PFVF or OVLAN can be active, but not both
1096          * So, if PFVF is enabled, then overwrite the OVLAN
1097          * fields with PFVF fields before writing the spec
1098          * to hardware.
1099          */
1100         if (iconf & F_VNIC) {
1101                 f->fs.val.ovlan = fs->val.pf << 13 | fs->val.vf;
1102                 f->fs.mask.ovlan = fs->mask.pf << 13 | fs->mask.vf;
1103                 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1104                 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1105         }
1106
1107         /*
1108          * Attempt to set the filter.  If we don't succeed, we clear
1109          * it and return the failure.
1110          */
1111         f->ctx = ctx;
1112         f->tid = fidx; /* Save the actual tid */
1113         ret = set_filter_wr(dev, filter_id);
1114         if (ret)
1115                 goto free_tid;
1116
1117         return ret;
1118
1119 free_tid:
1120         cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1121         clear_filter(f);
1122         return ret;
1123 }
1124
1125 /**
1126  * Handle a Hash filter write reply.
1127  */
1128 void cxgbe_hash_filter_rpl(struct adapter *adap,
1129                            const struct cpl_act_open_rpl *rpl)
1130 {
1131         struct tid_info *t = &adap->tids;
1132         struct filter_entry *f;
1133         struct filter_ctx *ctx = NULL;
1134         unsigned int tid = GET_TID(rpl);
1135         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1136                                       (be32_to_cpu(rpl->atid_status)));
1137         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1138
1139         f = lookup_atid(t, ftid);
1140         if (!f) {
1141                 dev_warn(adap, "%s: could not find filter entry: %d\n",
1142                          __func__, ftid);
1143                 return;
1144         }
1145
1146         ctx = f->ctx;
1147         f->ctx = NULL;
1148
1149         switch (status) {
1150         case CPL_ERR_NONE: {
1151                 f->tid = tid;
1152                 f->pending = 0;  /* asynchronous setup completed */
1153                 f->valid = 1;
1154
1155                 cxgbe_insert_tid(t, f, f->tid, 0);
1156                 cxgbe_free_atid(t, ftid);
1157                 if (ctx) {
1158                         ctx->tid = f->tid;
1159                         ctx->result = 0;
1160                 }
1161                 if (f->fs.hitcnts)
1162                         set_tcb_field(adap, tid,
1163                                       W_TCB_TIMESTAMP,
1164                                       V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1165                                       V_TCB_T_RTT_TS_RECENT_AGE
1166                                               (M_TCB_T_RTT_TS_RECENT_AGE),
1167                                       V_TCB_TIMESTAMP(0ULL) |
1168                                       V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1169                                       1);
1170                 if (f->fs.newdmac)
1171                         set_tcb_tflag(adap, tid, S_TF_CCTRL_ECE, 1, 1);
1172                 if (f->fs.newvlan == VLAN_INSERT ||
1173                     f->fs.newvlan == VLAN_REWRITE)
1174                         set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1175                 if (f->fs.newsmac) {
1176                         set_tcb_tflag(adap, tid, S_TF_CCTRL_CWR, 1, 1);
1177                         set_tcb_field(adap, tid, W_TCB_SMAC_SEL,
1178                                       V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1179                                       V_TCB_SMAC_SEL(f->smt->hw_idx), 1);
1180                 }
1181                 break;
1182         }
1183         default:
1184                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1185                          __func__, status);
1186
1187                 if (ctx) {
1188                         if (status == CPL_ERR_TCAM_FULL)
1189                                 ctx->result = -EAGAIN;
1190                         else
1191                                 ctx->result = -EINVAL;
1192                 }
1193
1194                 cxgbe_free_atid(t, ftid);
1195                 clear_filter(f);
1196                 t4_os_free(f);
1197         }
1198
1199         if (ctx)
1200                 t4_complete(&ctx->completion);
1201 }
1202
1203 /**
1204  * Handle a LE-TCAM filter write/deletion reply.
1205  */
1206 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1207 {
1208         struct filter_entry *f = NULL;
1209         unsigned int tid = GET_TID(rpl);
1210         int idx, max_fidx = adap->tids.nftids;
1211
1212         /* Get the corresponding filter entry for this tid */
1213         if (adap->tids.ftid_tab) {
1214                 /* Check this in normal filter region */
1215                 idx = tid - adap->tids.ftid_base;
1216                 if (idx >= max_fidx)
1217                         return;
1218
1219                 f = &adap->tids.ftid_tab[idx];
1220                 if (f->tid != tid)
1221                         return;
1222         }
1223
1224         /* We found the filter entry for this tid */
1225         if (f) {
1226                 unsigned int ret = G_COOKIE(rpl->cookie);
1227                 struct filter_ctx *ctx;
1228
1229                 /*
1230                  * Pull off any filter operation context attached to the
1231                  * filter.
1232                  */
1233                 ctx = f->ctx;
1234                 f->ctx = NULL;
1235
1236                 if (ret == FW_FILTER_WR_FLT_ADDED) {
1237                         f->pending = 0;  /* asynchronous setup completed */
1238                         f->valid = 1;
1239                         if (ctx) {
1240                                 ctx->tid = f->tid;
1241                                 ctx->result = 0;
1242                         }
1243                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1244                         /*
1245                          * Clear the filter when we get confirmation from the
1246                          * hardware that the filter has been deleted.
1247                          */
1248                         clear_filter(f);
1249                         if (ctx)
1250                                 ctx->result = 0;
1251                 } else {
1252                         /*
1253                          * Something went wrong.  Issue a warning about the
1254                          * problem and clear everything out.
1255                          */
1256                         dev_warn(adap, "filter %u setup failed with error %u\n",
1257                                  idx, ret);
1258                         clear_filter(f);
1259                         if (ctx)
1260                                 ctx->result = -EINVAL;
1261                 }
1262
1263                 if (ctx)
1264                         t4_complete(&ctx->completion);
1265         }
1266 }
1267
1268 /*
1269  * Retrieve the packet count for the specified filter.
1270  */
1271 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1272                            u64 *c, int hash, bool get_byte)
1273 {
1274         struct filter_entry *f;
1275         unsigned int tcb_base, tcbaddr;
1276         int ret;
1277
1278         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1279         if (is_hashfilter(adapter) && hash) {
1280                 if (fidx < adapter->tids.ntids) {
1281                         f = adapter->tids.tid_tab[fidx];
1282                         if (!f)
1283                                 return -EINVAL;
1284
1285                         if (is_t5(adapter->params.chip)) {
1286                                 *c = 0;
1287                                 return 0;
1288                         }
1289                         tcbaddr = tcb_base + (fidx * TCB_SIZE);
1290                         goto get_count;
1291                 } else {
1292                         return -ERANGE;
1293                 }
1294         } else {
1295                 if (fidx >= adapter->tids.nftids)
1296                         return -ERANGE;
1297
1298                 f = &adapter->tids.ftid_tab[fidx];
1299                 if (!f->valid)
1300                         return -EINVAL;
1301
1302                 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1303         }
1304
1305         f = &adapter->tids.ftid_tab[fidx];
1306         if (!f->valid)
1307                 return -EINVAL;
1308
1309 get_count:
1310         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1311                 /*
1312                  * For T5, the Filter Packet Hit Count is maintained as a
1313                  * 32-bit Big Endian value in the TCB field {timestamp}.
1314                  * Similar to the craziness above, instead of the filter hit
1315                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1316                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1317                  */
1318                 if (get_byte) {
1319                         unsigned int word_offset = 4;
1320                         __be64 be64_byte_count;
1321
1322                         t4_os_lock(&adapter->win0_lock);
1323                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1324                                            tcbaddr +
1325                                            (word_offset * sizeof(__be32)),
1326                                            sizeof(be64_byte_count),
1327                                            &be64_byte_count,
1328                                            T4_MEMORY_READ);
1329                         t4_os_unlock(&adapter->win0_lock);
1330                         if (ret < 0)
1331                                 return ret;
1332                         *c = be64_to_cpu(be64_byte_count);
1333                 } else {
1334                         unsigned int word_offset = 6;
1335                         __be32 be32_count;
1336
1337                         t4_os_lock(&adapter->win0_lock);
1338                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1339                                            tcbaddr +
1340                                            (word_offset * sizeof(__be32)),
1341                                            sizeof(be32_count), &be32_count,
1342                                            T4_MEMORY_READ);
1343                         t4_os_unlock(&adapter->win0_lock);
1344                         if (ret < 0)
1345                                 return ret;
1346                         *c = (u64)be32_to_cpu(be32_count);
1347                 }
1348         }
1349         return 0;
1350 }
1351
1352 /*
1353  * Clear the packet count for the specified filter.
1354  */
1355 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1356                              int hash, bool clear_byte)
1357 {
1358         u64 tcb_mask = 0, tcb_val = 0;
1359         struct filter_entry *f = NULL;
1360         u16 tcb_word = 0;
1361
1362         if (is_hashfilter(adapter) && hash) {
1363                 if (fidx >= adapter->tids.ntids)
1364                         return -ERANGE;
1365
1366                 /* No hitcounts supported for T5 hashfilters */
1367                 if (is_t5(adapter->params.chip))
1368                         return 0;
1369
1370                 f = adapter->tids.tid_tab[fidx];
1371         } else {
1372                 if (fidx >= adapter->tids.nftids)
1373                         return -ERANGE;
1374
1375                 f = &adapter->tids.ftid_tab[fidx];
1376         }
1377
1378         if (!f || !f->valid)
1379                 return -EINVAL;
1380
1381         tcb_word = W_TCB_TIMESTAMP;
1382         tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1383         tcb_val = V_TCB_TIMESTAMP(0ULL);
1384
1385         set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1386
1387         if (clear_byte) {
1388                 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1389                 tcb_mask =
1390                         V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1391                         V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1392                 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1393                           V_TCB_T_RTSEQ_RECENT(0ULL);
1394
1395                 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1396         }
1397
1398         return 0;
1399 }
1400
1401 /**
1402  * Handle a Hash filter delete reply.
1403  */
1404 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1405                                const struct cpl_abort_rpl_rss *rpl)
1406 {
1407         struct tid_info *t = &adap->tids;
1408         struct filter_entry *f;
1409         struct filter_ctx *ctx = NULL;
1410         unsigned int tid = GET_TID(rpl);
1411
1412         f = lookup_tid(t, tid);
1413         if (!f) {
1414                 dev_warn(adap, "%s: could not find filter entry: %u\n",
1415                          __func__, tid);
1416                 return;
1417         }
1418
1419         ctx = f->ctx;
1420
1421         clear_filter(f);
1422         cxgbe_remove_tid(t, 0, tid, 0);
1423         t4_os_free(f);
1424
1425         if (ctx) {
1426                 ctx->result = 0;
1427                 t4_complete(&ctx->completion);
1428         }
1429 }