net/cxgbe: fix SMT leak in filter error and free path
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
11 #include "mps_tcam.h"
12 #include "clip_tbl.h"
13 #include "l2t.h"
14 #include "smt.h"
15
16 /**
17  * Initialize Hash Filters
18  */
19 int cxgbe_init_hash_filter(struct adapter *adap)
20 {
21         unsigned int n_user_filters;
22         unsigned int user_filter_perc;
23         int ret;
24         u32 params[7], val[7];
25
26 #define FW_PARAM_DEV(param) \
27         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
28         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
29
30 #define FW_PARAM_PFVF(param) \
31         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
32         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
33         V_FW_PARAMS_PARAM_Y(0) | \
34         V_FW_PARAMS_PARAM_Z(0))
35
36         params[0] = FW_PARAM_DEV(NTID);
37         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
38                               params, val);
39         if (ret < 0)
40                 return ret;
41         adap->tids.ntids = val[0];
42         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
43
44         user_filter_perc = 100;
45         n_user_filters = mult_frac(adap->tids.nftids,
46                                    user_filter_perc,
47                                    100);
48
49         adap->tids.nftids = n_user_filters;
50         adap->params.hash_filter = 1;
51         return 0;
52 }
53
54 /**
55  * Validate if the requested filter specification can be set by checking
56  * if the requested features have been enabled
57  */
58 int cxgbe_validate_filter(struct adapter *adapter,
59                           struct ch_filter_specification *fs)
60 {
61         u32 fconf, iconf;
62
63         /*
64          * Check for unconfigured fields being used.
65          */
66         fconf = fs->cap ? adapter->params.tp.filter_mask :
67                           adapter->params.tp.vlan_pri_map;
68
69         iconf = adapter->params.tp.ingress_config;
70
71 #define S(_field) \
72         (fs->val._field || fs->mask._field)
73 #define U(_mask, _field) \
74         (!(fconf & (_mask)) && S(_field))
75
76         if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
77             U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
78             U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
79             U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
80                 return -EOPNOTSUPP;
81
82         /* Either OVLAN or PFVF match is enabled in hardware, but not both */
83         if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
84             (S(ovlan_vld) && (iconf & F_VNIC)))
85                 return -EOPNOTSUPP;
86
87         /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
88         if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
89             (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
90                 return -EOPNOTSUPP;
91
92 #undef S
93 #undef U
94
95         /*
96          * If the user is requesting that the filter action loop
97          * matching packets back out one of our ports, make sure that
98          * the egress port is in range.
99          */
100         if (fs->action == FILTER_SWITCH &&
101             fs->eport >= adapter->params.nports)
102                 return -ERANGE;
103
104         /*
105          * Don't allow various trivially obvious bogus out-of-range
106          * values ...
107          */
108         if (fs->val.iport >= adapter->params.nports)
109                 return -ERANGE;
110
111         if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
112                 return -EOPNOTSUPP;
113
114         if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
115                 return -EOPNOTSUPP;
116
117         return 0;
118 }
119
120 /**
121  * Get the queue to which the traffic must be steered to.
122  */
123 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
124                                       struct ch_filter_specification *fs)
125 {
126         struct port_info *pi = ethdev2pinfo(dev);
127         struct adapter *adapter = pi->adapter;
128         unsigned int iq;
129
130         /*
131          * If the user has requested steering matching Ingress Packets
132          * to a specific Queue Set, we need to make sure it's in range
133          * for the port and map that into the Absolute Queue ID of the
134          * Queue Set's Response Queue.
135          */
136         if (!fs->dirsteer) {
137                 iq = 0;
138         } else {
139                 /*
140                  * If the iq id is greater than the number of qsets,
141                  * then assume it is an absolute qid.
142                  */
143                 if (fs->iq < pi->n_rx_qsets)
144                         iq = adapter->sge.ethrxq[pi->first_qset +
145                                                  fs->iq].rspq.abs_id;
146                 else
147                         iq = fs->iq;
148         }
149
150         return iq;
151 }
152
153 /* Return an error number if the indicated filter isn't writable ... */
154 static int writable_filter(struct filter_entry *f)
155 {
156         if (f->locked)
157                 return -EPERM;
158         if (f->pending)
159                 return -EBUSY;
160
161         return 0;
162 }
163
164 /**
165  * Send CPL_SET_TCB_FIELD message
166  */
167 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
168                           u16 word, u64 mask, u64 val, int no_reply)
169 {
170         struct rte_mbuf *mbuf;
171         struct cpl_set_tcb_field *req;
172         struct sge_ctrl_txq *ctrlq;
173
174         ctrlq = &adapter->sge.ctrlq[0];
175         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
176         WARN_ON(!mbuf);
177
178         mbuf->data_len = sizeof(*req);
179         mbuf->pkt_len = mbuf->data_len;
180
181         req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
182         memset(req, 0, sizeof(*req));
183         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
184         req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
185                                       V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
186                                       V_NO_REPLY(no_reply));
187         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
188         req->mask = cpu_to_be64(mask);
189         req->val = cpu_to_be64(val);
190
191         t4_mgmt_tx(ctrlq, mbuf);
192 }
193
194 /**
195  * Set one of the t_flags bits in the TCB.
196  */
197 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
198                           unsigned int bit_pos, unsigned int val, int no_reply)
199 {
200         set_tcb_field(adap, ftid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
201                       (unsigned long long)val << bit_pos, no_reply);
202 }
203
204 /**
205  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
206  */
207 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
208                                         struct cpl_set_tcb_field *req,
209                                         unsigned int word,
210                                         u64 mask, u64 val, u8 cookie,
211                                         int no_reply)
212 {
213         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
214         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
215
216         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
217                                       V_ULP_TXPKT_DEST(0));
218         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
219         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
220         sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
221         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
222         req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
223                                       V_QUEUENO(0));
224         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
225         req->mask = cpu_to_be64(mask);
226         req->val = cpu_to_be64(val);
227         sc = (struct ulptx_idata *)(req + 1);
228         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
229         sc->len = cpu_to_be32(0);
230 }
231
232 /**
233  * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
234  * IPv4 requires only 1 slot on all cards.
235  */
236 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
237 {
238         if (family == FILTER_TYPE_IPV6) {
239                 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
240                         return 4;
241
242                 return 2;
243         }
244
245         return 1;
246 }
247
248 /**
249  * Check if entries are already filled.
250  */
251 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
252 {
253         bool result = FALSE;
254         u32 i;
255
256         /* Ensure there's enough slots available. */
257         t4_os_lock(&t->ftid_lock);
258         for (i = fidx; i < fidx + nentries; i++) {
259                 if (rte_bitmap_get(t->ftid_bmap, i)) {
260                         result = TRUE;
261                         break;
262                 }
263         }
264         t4_os_unlock(&t->ftid_lock);
265         return result;
266 }
267
268 /**
269  * Allocate available free entries.
270  */
271 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
272 {
273         struct tid_info *t = &adap->tids;
274         int pos;
275         int size = t->nftids;
276
277         t4_os_lock(&t->ftid_lock);
278         if (nentries > 1)
279                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
280                                                     nentries);
281         else
282                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
283         t4_os_unlock(&t->ftid_lock);
284
285         return pos < size ? pos : -1;
286 }
287
288 /**
289  * Clear a filter and release any of its resources that we own.  This also
290  * clears the filter's "pending" status.
291  */
292 static void clear_filter(struct filter_entry *f)
293 {
294         struct port_info *pi = ethdev2pinfo(f->dev);
295
296         if (f->clipt)
297                 cxgbe_clip_release(f->dev, f->clipt);
298
299         if (f->l2t)
300                 cxgbe_l2t_release(f->l2t);
301
302         if (f->fs.mask.macidx)
303                 cxgbe_mpstcam_remove(pi, f->fs.val.macidx);
304
305         if (f->smt)
306                 cxgbe_smt_release(f->smt);
307
308         /* The zeroing of the filter rule below clears the filter valid,
309          * pending, locked flags etc. so it's all we need for
310          * this operation.
311          */
312         memset(f, 0, sizeof(*f));
313 }
314
315 /**
316  * Construct hash filter ntuple.
317  */
318 static u64 hash_filter_ntuple(const struct filter_entry *f)
319 {
320         struct adapter *adap = ethdev2adap(f->dev);
321         struct tp_params *tp = &adap->params.tp;
322         u64 ntuple = 0;
323         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
324
325         if (tp->port_shift >= 0 && f->fs.mask.iport)
326                 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
327
328         if (tp->protocol_shift >= 0) {
329                 if (!f->fs.val.proto)
330                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
331                 else
332                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
333         }
334
335         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
336                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
337         if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
338                 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
339         if (tp->vlan_shift >= 0 && f->fs.mask.ivlan)
340                 ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
341                           tp->vlan_shift;
342         if (tp->vnic_shift >= 0) {
343                 if ((adap->params.tp.ingress_config & F_VNIC) &&
344                     f->fs.mask.pfvf_vld)
345                         ntuple |= (u64)(f->fs.val.pfvf_vld << 16 |
346                                         f->fs.val.pf << 13 | f->fs.val.vf) <<
347                                         tp->vnic_shift;
348                 else if (!(adap->params.tp.ingress_config & F_VNIC) &&
349                          f->fs.mask.ovlan_vld)
350                         ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
351                                         f->fs.val.ovlan) << tp->vnic_shift;
352         }
353         if (tp->tos_shift >= 0 && f->fs.mask.tos)
354                 ntuple |= (u64)f->fs.val.tos << tp->tos_shift;
355
356         return ntuple;
357 }
358
359 /**
360  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
361  */
362 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
363                              unsigned int tid)
364 {
365         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
366         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
367
368         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
369                                       V_ULP_TXPKT_DEST(0));
370         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
371         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
372         sc->len = cpu_to_be32(sizeof(*abort_req) -
373                               sizeof(struct work_request_hdr));
374         OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
375         abort_req->rsvd0 = cpu_to_be32(0);
376         abort_req->rsvd1 = 0;
377         abort_req->cmd = CPL_ABORT_NO_RST;
378         sc = (struct ulptx_idata *)(abort_req + 1);
379         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
380         sc->len = cpu_to_be32(0);
381 }
382
383 /**
384  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
385  */
386 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
387                              unsigned int tid)
388 {
389         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
390         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
391
392         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
393                                       V_ULP_TXPKT_DEST(0));
394         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
395         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
396         sc->len = cpu_to_be32(sizeof(*abort_rpl) -
397                               sizeof(struct work_request_hdr));
398         OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
399         abort_rpl->rsvd0 = cpu_to_be32(0);
400         abort_rpl->rsvd1 = 0;
401         abort_rpl->cmd = CPL_ABORT_NO_RST;
402         sc = (struct ulptx_idata *)(abort_rpl + 1);
403         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
404         sc->len = cpu_to_be32(0);
405 }
406
407 /**
408  * Delete the specified hash filter.
409  */
410 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
411                                  unsigned int filter_id,
412                                  struct filter_ctx *ctx)
413 {
414         struct adapter *adapter = ethdev2adap(dev);
415         struct tid_info *t = &adapter->tids;
416         struct filter_entry *f;
417         struct sge_ctrl_txq *ctrlq;
418         unsigned int port_id = ethdev2pinfo(dev)->port_id;
419         int ret;
420
421         if (filter_id > adapter->tids.ntids)
422                 return -E2BIG;
423
424         f = lookup_tid(t, filter_id);
425         if (!f) {
426                 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
427                         __func__, filter_id);
428                 return -EINVAL;
429         }
430
431         ret = writable_filter(f);
432         if (ret)
433                 return ret;
434
435         if (f->valid) {
436                 unsigned int wrlen;
437                 struct rte_mbuf *mbuf;
438                 struct work_request_hdr *wr;
439                 struct ulptx_idata *aligner;
440                 struct cpl_set_tcb_field *req;
441                 struct cpl_abort_req *abort_req;
442                 struct cpl_abort_rpl *abort_rpl;
443
444                 f->ctx = ctx;
445                 f->pending = 1;
446
447                 wrlen = cxgbe_roundup(sizeof(*wr) +
448                                       (sizeof(*req) + sizeof(*aligner)) +
449                                       sizeof(*abort_req) + sizeof(*abort_rpl),
450                                       16);
451
452                 ctrlq = &adapter->sge.ctrlq[port_id];
453                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
454                 if (!mbuf) {
455                         dev_err(adapter, "%s: could not allocate skb ..\n",
456                                 __func__);
457                         goto out_err;
458                 }
459
460                 mbuf->data_len = wrlen;
461                 mbuf->pkt_len = mbuf->data_len;
462
463                 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
464                 INIT_ULPTX_WR(req, wrlen, 0, 0);
465                 wr = (struct work_request_hdr *)req;
466                 wr++;
467                 req = (struct cpl_set_tcb_field *)wr;
468                 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
469                                 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
470                                 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
471                                 0, 1);
472                 aligner = (struct ulptx_idata *)(req + 1);
473                 abort_req = (struct cpl_abort_req *)(aligner + 1);
474                 mk_abort_req_ulp(abort_req, f->tid);
475                 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
476                 mk_abort_rpl_ulp(abort_rpl, f->tid);
477                 t4_mgmt_tx(ctrlq, mbuf);
478         }
479         return 0;
480
481 out_err:
482         return -ENOMEM;
483 }
484
485 /**
486  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
487  */
488 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
489                              unsigned int qid_filterid, struct adapter *adap)
490 {
491         struct cpl_t6_act_open_req6 *req = NULL;
492         u64 local_lo, local_hi, peer_lo, peer_hi;
493         u32 *lip = (u32 *)f->fs.val.lip;
494         u32 *fip = (u32 *)f->fs.val.fip;
495
496         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
497         case CHELSIO_T6:
498                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
499
500                 INIT_TP_WR(req, 0);
501                 break;
502         default:
503                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
504                 return;
505         }
506
507         local_hi = ((u64)lip[1]) << 32 | lip[0];
508         local_lo = ((u64)lip[3]) << 32 | lip[2];
509         peer_hi = ((u64)fip[1]) << 32 | fip[0];
510         peer_lo = ((u64)fip[3]) << 32 | fip[2];
511
512         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
513                                                     qid_filterid));
514         req->local_port = cpu_to_be16(f->fs.val.lport);
515         req->peer_port = cpu_to_be16(f->fs.val.fport);
516         req->local_ip_hi = local_hi;
517         req->local_ip_lo = local_lo;
518         req->peer_ip_hi = peer_hi;
519         req->peer_ip_lo = peer_lo;
520         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
521                                         f->fs.newvlan == VLAN_REWRITE) |
522                                 V_DELACK(f->fs.hitcnts) |
523                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
524                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
525                                            << 1) |
526                                 V_TX_CHAN(f->fs.eport) |
527                                 V_ULP_MODE(ULP_MODE_NONE) |
528                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
529         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
530         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
531                             V_RSS_QUEUE(f->fs.iq) |
532                             F_T5_OPT_2_VALID |
533                             F_RX_CHANNEL |
534                             V_SACK_EN(f->fs.swapmac) |
535                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
536                                          (f->fs.dirsteer << 1)) |
537                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
538 }
539
540 /**
541  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
542  */
543 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
544                             unsigned int qid_filterid, struct adapter *adap)
545 {
546         struct cpl_t6_act_open_req *req = NULL;
547
548         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
549         case CHELSIO_T6:
550                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
551
552                 INIT_TP_WR(req, 0);
553                 break;
554         default:
555                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
556                 return;
557         }
558
559         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
560                                                     qid_filterid));
561         req->local_port = cpu_to_be16(f->fs.val.lport);
562         req->peer_port = cpu_to_be16(f->fs.val.fport);
563         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
564                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
565         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
566                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
567         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
568                                         f->fs.newvlan == VLAN_REWRITE) |
569                                 V_DELACK(f->fs.hitcnts) |
570                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
571                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
572                                            << 1) |
573                                 V_TX_CHAN(f->fs.eport) |
574                                 V_ULP_MODE(ULP_MODE_NONE) |
575                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
576         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
577         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
578                             V_RSS_QUEUE(f->fs.iq) |
579                             F_T5_OPT_2_VALID |
580                             F_RX_CHANNEL |
581                             V_SACK_EN(f->fs.swapmac) |
582                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
583                                          (f->fs.dirsteer << 1)) |
584                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
585 }
586
587 /**
588  * Set the specified hash filter.
589  */
590 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
591                                  struct ch_filter_specification *fs,
592                                  struct filter_ctx *ctx)
593 {
594         struct port_info *pi = ethdev2pinfo(dev);
595         struct adapter *adapter = pi->adapter;
596         struct tid_info *t = &adapter->tids;
597         struct filter_entry *f;
598         struct rte_mbuf *mbuf;
599         struct sge_ctrl_txq *ctrlq;
600         unsigned int iq;
601         int atid, size;
602         int ret = 0;
603
604         ret = cxgbe_validate_filter(adapter, fs);
605         if (ret)
606                 return ret;
607
608         iq = get_filter_steerq(dev, fs);
609
610         ctrlq = &adapter->sge.ctrlq[pi->port_id];
611
612         f = t4_os_alloc(sizeof(*f));
613         if (!f)
614                 return -ENOMEM;
615
616         f->fs = *fs;
617         f->ctx = ctx;
618         f->dev = dev;
619         f->fs.iq = iq;
620
621         /* Allocate MPS TCAM entry to match Destination MAC. */
622         if (f->fs.mask.macidx) {
623                 int idx;
624
625                 idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
626                 if (idx <= 0) {
627                         ret = -ENOMEM;
628                         goto out_err;
629                 }
630
631                 f->fs.val.macidx = idx;
632         }
633
634         /*
635          * If the new filter requires loopback Destination MAC and/or VLAN
636          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
637          * the filter.
638          */
639         if (f->fs.newdmac || f->fs.newvlan == VLAN_INSERT ||
640             f->fs.newvlan == VLAN_REWRITE) {
641                 /* allocate L2T entry for new filter */
642                 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
643                                                    f->fs.eport, f->fs.dmac);
644                 if (!f->l2t) {
645                         ret = -ENOMEM;
646                         goto out_err;
647                 }
648         }
649
650         /* If the new filter requires Source MAC rewriting then we need to
651          * allocate a SMT entry for the filter
652          */
653         if (f->fs.newsmac) {
654                 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
655                 if (!f->smt) {
656                         ret = -EAGAIN;
657                         goto out_err;
658                 }
659         }
660
661         atid = cxgbe_alloc_atid(t, f);
662         if (atid < 0)
663                 goto out_err;
664
665         if (f->fs.type == FILTER_TYPE_IPV6) {
666                 /* IPv6 hash filter */
667                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
668                 if (!f->clipt)
669                         goto free_atid;
670
671                 size = sizeof(struct cpl_t6_act_open_req6);
672                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
673                 if (!mbuf) {
674                         ret = -ENOMEM;
675                         goto free_atid;
676                 }
677
678                 mbuf->data_len = size;
679                 mbuf->pkt_len = mbuf->data_len;
680
681                 mk_act_open_req6(f, mbuf,
682                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
683                                  adapter);
684         } else {
685                 /* IPv4 hash filter */
686                 size = sizeof(struct cpl_t6_act_open_req);
687                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
688                 if (!mbuf) {
689                         ret = -ENOMEM;
690                         goto free_atid;
691                 }
692
693                 mbuf->data_len = size;
694                 mbuf->pkt_len = mbuf->data_len;
695
696                 mk_act_open_req(f, mbuf,
697                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
698                                 adapter);
699         }
700
701         f->pending = 1;
702         t4_mgmt_tx(ctrlq, mbuf);
703         return 0;
704
705 free_atid:
706         cxgbe_free_atid(t, atid);
707
708 out_err:
709         clear_filter(f);
710         t4_os_free(f);
711         return ret;
712 }
713
714 /**
715  * t4_mk_filtdelwr - create a delete filter WR
716  * @adap: adapter context
717  * @ftid: the filter ID
718  * @wr: the filter work request to populate
719  * @qid: ingress queue to receive the delete notification
720  *
721  * Creates a filter work request to delete the supplied filter.  If @qid is
722  * negative the delete notification is suppressed.
723  */
724 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
725                             struct fw_filter2_wr *wr, int qid)
726 {
727         memset(wr, 0, sizeof(*wr));
728         if (adap->params.filter2_wr_support)
729                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
730         else
731                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
732         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
733         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
734                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
735         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
736         if (qid >= 0)
737                 wr->rx_chan_rx_rpl_iq =
738                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
739 }
740
741 /**
742  * Create FW work request to delete the filter at a specified index
743  */
744 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
745 {
746         struct adapter *adapter = ethdev2adap(dev);
747         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
748         struct rte_mbuf *mbuf;
749         struct fw_filter2_wr *fwr;
750         struct sge_ctrl_txq *ctrlq;
751         unsigned int port_id = ethdev2pinfo(dev)->port_id;
752
753         ctrlq = &adapter->sge.ctrlq[port_id];
754         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
755         if (!mbuf)
756                 return -ENOMEM;
757
758         mbuf->data_len = sizeof(*fwr);
759         mbuf->pkt_len = mbuf->data_len;
760
761         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
762         t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
763
764         /*
765          * Mark the filter as "pending" and ship off the Filter Work Request.
766          * When we get the Work Request Reply we'll clear the pending status.
767          */
768         f->pending = 1;
769         t4_mgmt_tx(ctrlq, mbuf);
770         return 0;
771 }
772
773 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
774 {
775         struct adapter *adapter = ethdev2adap(dev);
776         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
777         struct rte_mbuf *mbuf;
778         struct fw_filter2_wr *fwr;
779         struct sge_ctrl_txq *ctrlq;
780         unsigned int port_id = ethdev2pinfo(dev)->port_id;
781         int ret;
782
783         ctrlq = &adapter->sge.ctrlq[port_id];
784         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
785         if (!mbuf) {
786                 ret = -ENOMEM;
787                 goto out;
788         }
789
790         mbuf->data_len = sizeof(*fwr);
791         mbuf->pkt_len = mbuf->data_len;
792
793         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
794         memset(fwr, 0, sizeof(*fwr));
795
796         /*
797          * Construct the work request to set the filter.
798          */
799         if (adapter->params.filter2_wr_support)
800                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
801         else
802                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
803         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
804         fwr->tid_to_iq =
805                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
806                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
807                             V_FW_FILTER_WR_NOREPLY(0) |
808                             V_FW_FILTER_WR_IQ(f->fs.iq));
809         fwr->del_filter_to_l2tix =
810                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
811                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
812                             V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
813                             V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
814                             V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
815                             V_FW_FILTER_WR_INSVLAN
816                                 (f->fs.newvlan == VLAN_INSERT ||
817                                  f->fs.newvlan == VLAN_REWRITE) |
818                             V_FW_FILTER_WR_RMVLAN
819                                 (f->fs.newvlan == VLAN_REMOVE ||
820                                  f->fs.newvlan == VLAN_REWRITE) |
821                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
822                             V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
823                             V_FW_FILTER_WR_PRIO(f->fs.prio) |
824                             V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
825         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
826         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
827         fwr->frag_to_ovlan_vldm =
828                 (V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
829                  V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
830                  V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
831                  V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
832         fwr->smac_sel = f->smt ? f->smt->hw_idx : 0;
833         fwr->rx_chan_rx_rpl_iq =
834                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
835                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
836                                                      ));
837         fwr->maci_to_matchtypem =
838                 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
839                             V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
840                             V_FW_FILTER_WR_PORT(f->fs.val.iport) |
841                             V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
842         fwr->ptcl = f->fs.val.proto;
843         fwr->ptclm = f->fs.mask.proto;
844         fwr->ttyp = f->fs.val.tos;
845         fwr->ttypm = f->fs.mask.tos;
846         fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
847         fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
848         fwr->ovlan = cpu_to_be16(f->fs.val.ovlan);
849         fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan);
850         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
851         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
852         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
853         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
854         fwr->lp = cpu_to_be16(f->fs.val.lport);
855         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
856         fwr->fp = cpu_to_be16(f->fs.val.fport);
857         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
858
859         if (adapter->params.filter2_wr_support) {
860                 fwr->filter_type_swapmac =
861                          V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
862                 fwr->natmode_to_ulp_type =
863                         V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
864                                                  ULP_MODE_TCPDDP :
865                                                  ULP_MODE_NONE) |
866                         V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
867                 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
868                 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
869                 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
870                 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
871         }
872
873         /*
874          * Mark the filter as "pending" and ship off the Filter Work Request.
875          * When we get the Work Request Reply we'll clear the pending status.
876          */
877         f->pending = 1;
878         t4_mgmt_tx(ctrlq, mbuf);
879         return 0;
880
881 out:
882         return ret;
883 }
884
885 /**
886  * Set the corresponding entries in the bitmap.
887  */
888 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
889 {
890         u32 i;
891
892         t4_os_lock(&t->ftid_lock);
893         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
894                 t4_os_unlock(&t->ftid_lock);
895                 return -EBUSY;
896         }
897
898         for (i = fidx; i < fidx + nentries; i++)
899                 rte_bitmap_set(t->ftid_bmap, i);
900         t4_os_unlock(&t->ftid_lock);
901         return 0;
902 }
903
904 /**
905  * Clear the corresponding entries in the bitmap.
906  */
907 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
908 {
909         u32 i;
910
911         t4_os_lock(&t->ftid_lock);
912         for (i = fidx; i < fidx + nentries; i++)
913                 rte_bitmap_clear(t->ftid_bmap, i);
914         t4_os_unlock(&t->ftid_lock);
915 }
916
917 /**
918  * Check a delete filter request for validity and send it to the hardware.
919  * Return 0 on success, an error number otherwise.  We attach any provided
920  * filter operation context to the internal filter specification in order to
921  * facilitate signaling completion of the operation.
922  */
923 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
924                      struct ch_filter_specification *fs,
925                      struct filter_ctx *ctx)
926 {
927         struct port_info *pi = dev->data->dev_private;
928         struct adapter *adapter = pi->adapter;
929         struct filter_entry *f;
930         unsigned int chip_ver;
931         u8 nentries;
932         int ret;
933
934         if (is_hashfilter(adapter) && fs->cap)
935                 return cxgbe_del_hash_filter(dev, filter_id, ctx);
936
937         if (filter_id >= adapter->tids.nftids)
938                 return -ERANGE;
939
940         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
941
942         /*
943          * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
944          * and 4 slot boundary for cards below T6.
945          */
946         if (fs->type == FILTER_TYPE_IPV6) {
947                 if (chip_ver < CHELSIO_T6)
948                         filter_id &= ~(0x3);
949                 else
950                         filter_id &= ~(0x1);
951         }
952
953         nentries = cxgbe_filter_slots(adapter, fs->type);
954         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
955         if (!ret) {
956                 dev_warn(adap, "%s: could not find filter entry: %u\n",
957                          __func__, filter_id);
958                 return -EINVAL;
959         }
960
961         f = &adapter->tids.ftid_tab[filter_id];
962         ret = writable_filter(f);
963         if (ret)
964                 return ret;
965
966         if (f->valid) {
967                 f->ctx = ctx;
968                 cxgbe_clear_ftid(&adapter->tids,
969                                  f->tid - adapter->tids.ftid_base,
970                                  nentries);
971                 return del_filter_wr(dev, filter_id);
972         }
973
974         /*
975          * If the caller has passed in a Completion Context then we need to
976          * mark it as a successful completion so they don't stall waiting
977          * for it.
978          */
979         if (ctx) {
980                 ctx->result = 0;
981                 t4_complete(&ctx->completion);
982         }
983
984         return 0;
985 }
986
987 /**
988  * Check a Chelsio Filter Request for validity, convert it into our internal
989  * format and send it to the hardware.  Return 0 on success, an error number
990  * otherwise.  We attach any provided filter operation context to the internal
991  * filter specification in order to facilitate signaling completion of the
992  * operation.
993  */
994 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
995                      struct ch_filter_specification *fs,
996                      struct filter_ctx *ctx)
997 {
998         struct port_info *pi = ethdev2pinfo(dev);
999         struct adapter *adapter = pi->adapter;
1000         u8 nentries, bitoff[16] = {0};
1001         struct filter_entry *f;
1002         unsigned int chip_ver;
1003         unsigned int fidx, iq;
1004         u32 iconf;
1005         int ret;
1006
1007         if (is_hashfilter(adapter) && fs->cap)
1008                 return cxgbe_set_hash_filter(dev, fs, ctx);
1009
1010         if (filter_id >= adapter->tids.nftids)
1011                 return -ERANGE;
1012
1013         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1014
1015         ret = cxgbe_validate_filter(adapter, fs);
1016         if (ret)
1017                 return ret;
1018
1019         /*
1020          * IPv6 filters occupy four slots and must be aligned on four-slot
1021          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
1022          * must be aligned on two-slot boundaries.
1023          *
1024          * IPv4 filters only occupy a single slot and have no alignment
1025          * requirements.
1026          */
1027         fidx = filter_id;
1028         if (fs->type == FILTER_TYPE_IPV6) {
1029                 if (chip_ver < CHELSIO_T6)
1030                         fidx &= ~(0x3);
1031                 else
1032                         fidx &= ~(0x1);
1033         }
1034
1035         if (fidx != filter_id)
1036                 return -EINVAL;
1037
1038         nentries = cxgbe_filter_slots(adapter, fs->type);
1039         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
1040         if (ret)
1041                 return -EBUSY;
1042
1043         iq = get_filter_steerq(dev, fs);
1044
1045         /*
1046          * Check to make sure that provided filter index is not
1047          * already in use by someone else
1048          */
1049         f = &adapter->tids.ftid_tab[filter_id];
1050         if (f->valid)
1051                 return -EBUSY;
1052
1053         fidx = adapter->tids.ftid_base + filter_id;
1054         ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
1055         if (ret)
1056                 return ret;
1057
1058         /*
1059          * Check to make sure the filter requested is writable ...
1060          */
1061         ret = writable_filter(f);
1062         if (ret) {
1063                 /* Clear the bits we have set above */
1064                 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1065                 return ret;
1066         }
1067
1068         /*
1069          * Convert the filter specification into our internal format.
1070          * We copy the PF/VF specification into the Outer VLAN field
1071          * here so the rest of the code -- including the interface to
1072          * the firmware -- doesn't have to constantly do these checks.
1073          */
1074         f->fs = *fs;
1075         f->fs.iq = iq;
1076         f->dev = dev;
1077
1078         /* Allocate MPS TCAM entry to match Destination MAC. */
1079         if (f->fs.mask.macidx) {
1080                 int idx;
1081
1082                 idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
1083                 if (idx <= 0) {
1084                         ret = -ENOMEM;
1085                         goto free_tid;
1086                 }
1087
1088                 f->fs.val.macidx = idx;
1089         }
1090
1091         /* Allocate a clip table entry only if we have non-zero IPv6 address. */
1092         if (chip_ver > CHELSIO_T5 && f->fs.type &&
1093             memcmp(f->fs.val.lip, bitoff, sizeof(bitoff))) {
1094                 f->clipt = cxgbe_clip_alloc(dev, (u32 *)&f->fs.val.lip);
1095                 if (!f->clipt) {
1096                         ret = -ENOMEM;
1097                         goto free_tid;
1098                 }
1099         }
1100
1101         /* If the new filter requires loopback Destination MAC and/or VLAN
1102          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1103          * the filter.
1104          */
1105         if (f->fs.newvlan || f->fs.newdmac) {
1106                 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
1107                                                    f->fs.eport, f->fs.dmac);
1108                 if (!f->l2t) {
1109                         ret = -ENOMEM;
1110                         goto free_tid;
1111                 }
1112         }
1113
1114         /* If the new filter requires Source MAC rewriting then we need to
1115          * allocate a SMT entry for the filter
1116          */
1117         if (f->fs.newsmac) {
1118                 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
1119                 if (!f->smt) {
1120                         ret = -ENOMEM;
1121                         goto free_tid;
1122                 }
1123         }
1124
1125         iconf = adapter->params.tp.ingress_config;
1126
1127         /* Either PFVF or OVLAN can be active, but not both
1128          * So, if PFVF is enabled, then overwrite the OVLAN
1129          * fields with PFVF fields before writing the spec
1130          * to hardware.
1131          */
1132         if (iconf & F_VNIC) {
1133                 f->fs.val.ovlan = fs->val.pf << 13 | fs->val.vf;
1134                 f->fs.mask.ovlan = fs->mask.pf << 13 | fs->mask.vf;
1135                 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1136                 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1137         }
1138
1139         /*
1140          * Attempt to set the filter.  If we don't succeed, we clear
1141          * it and return the failure.
1142          */
1143         f->ctx = ctx;
1144         f->tid = fidx; /* Save the actual tid */
1145         ret = set_filter_wr(dev, filter_id);
1146         if (ret)
1147                 goto free_tid;
1148
1149         return ret;
1150
1151 free_tid:
1152         cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1153         clear_filter(f);
1154         return ret;
1155 }
1156
1157 /**
1158  * Handle a Hash filter write reply.
1159  */
1160 void cxgbe_hash_filter_rpl(struct adapter *adap,
1161                            const struct cpl_act_open_rpl *rpl)
1162 {
1163         struct tid_info *t = &adap->tids;
1164         struct filter_entry *f;
1165         struct filter_ctx *ctx = NULL;
1166         unsigned int tid = GET_TID(rpl);
1167         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1168                                       (be32_to_cpu(rpl->atid_status)));
1169         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1170
1171         f = lookup_atid(t, ftid);
1172         if (!f) {
1173                 dev_warn(adap, "%s: could not find filter entry: %d\n",
1174                          __func__, ftid);
1175                 return;
1176         }
1177
1178         ctx = f->ctx;
1179         f->ctx = NULL;
1180
1181         switch (status) {
1182         case CPL_ERR_NONE: {
1183                 f->tid = tid;
1184                 f->pending = 0;  /* asynchronous setup completed */
1185                 f->valid = 1;
1186
1187                 cxgbe_insert_tid(t, f, f->tid, 0);
1188                 cxgbe_free_atid(t, ftid);
1189                 if (ctx) {
1190                         ctx->tid = f->tid;
1191                         ctx->result = 0;
1192                 }
1193                 if (f->fs.hitcnts)
1194                         set_tcb_field(adap, tid,
1195                                       W_TCB_TIMESTAMP,
1196                                       V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1197                                       V_TCB_T_RTT_TS_RECENT_AGE
1198                                               (M_TCB_T_RTT_TS_RECENT_AGE),
1199                                       V_TCB_TIMESTAMP(0ULL) |
1200                                       V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1201                                       1);
1202                 if (f->fs.newdmac)
1203                         set_tcb_tflag(adap, tid, S_TF_CCTRL_ECE, 1, 1);
1204                 if (f->fs.newvlan == VLAN_INSERT ||
1205                     f->fs.newvlan == VLAN_REWRITE)
1206                         set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1207                 if (f->fs.newsmac) {
1208                         set_tcb_tflag(adap, tid, S_TF_CCTRL_CWR, 1, 1);
1209                         set_tcb_field(adap, tid, W_TCB_SMAC_SEL,
1210                                       V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1211                                       V_TCB_SMAC_SEL(f->smt->hw_idx), 1);
1212                 }
1213                 break;
1214         }
1215         default:
1216                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1217                          __func__, status);
1218
1219                 if (ctx) {
1220                         if (status == CPL_ERR_TCAM_FULL)
1221                                 ctx->result = -EAGAIN;
1222                         else
1223                                 ctx->result = -EINVAL;
1224                 }
1225
1226                 cxgbe_free_atid(t, ftid);
1227                 clear_filter(f);
1228                 t4_os_free(f);
1229         }
1230
1231         if (ctx)
1232                 t4_complete(&ctx->completion);
1233 }
1234
1235 /**
1236  * Handle a LE-TCAM filter write/deletion reply.
1237  */
1238 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1239 {
1240         struct filter_entry *f = NULL;
1241         unsigned int tid = GET_TID(rpl);
1242         int idx, max_fidx = adap->tids.nftids;
1243
1244         /* Get the corresponding filter entry for this tid */
1245         if (adap->tids.ftid_tab) {
1246                 /* Check this in normal filter region */
1247                 idx = tid - adap->tids.ftid_base;
1248                 if (idx >= max_fidx)
1249                         return;
1250
1251                 f = &adap->tids.ftid_tab[idx];
1252                 if (f->tid != tid)
1253                         return;
1254         }
1255
1256         /* We found the filter entry for this tid */
1257         if (f) {
1258                 unsigned int ret = G_COOKIE(rpl->cookie);
1259                 struct filter_ctx *ctx;
1260
1261                 /*
1262                  * Pull off any filter operation context attached to the
1263                  * filter.
1264                  */
1265                 ctx = f->ctx;
1266                 f->ctx = NULL;
1267
1268                 if (ret == FW_FILTER_WR_FLT_ADDED) {
1269                         f->pending = 0;  /* asynchronous setup completed */
1270                         f->valid = 1;
1271                         if (ctx) {
1272                                 ctx->tid = f->tid;
1273                                 ctx->result = 0;
1274                         }
1275                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1276                         /*
1277                          * Clear the filter when we get confirmation from the
1278                          * hardware that the filter has been deleted.
1279                          */
1280                         clear_filter(f);
1281                         if (ctx)
1282                                 ctx->result = 0;
1283                 } else {
1284                         /*
1285                          * Something went wrong.  Issue a warning about the
1286                          * problem and clear everything out.
1287                          */
1288                         dev_warn(adap, "filter %u setup failed with error %u\n",
1289                                  idx, ret);
1290                         clear_filter(f);
1291                         if (ctx)
1292                                 ctx->result = -EINVAL;
1293                 }
1294
1295                 if (ctx)
1296                         t4_complete(&ctx->completion);
1297         }
1298 }
1299
1300 /*
1301  * Retrieve the packet count for the specified filter.
1302  */
1303 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1304                            u64 *c, int hash, bool get_byte)
1305 {
1306         struct filter_entry *f;
1307         unsigned int tcb_base, tcbaddr;
1308         int ret;
1309
1310         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1311         if (is_hashfilter(adapter) && hash) {
1312                 if (fidx < adapter->tids.ntids) {
1313                         f = adapter->tids.tid_tab[fidx];
1314                         if (!f)
1315                                 return -EINVAL;
1316
1317                         if (is_t5(adapter->params.chip)) {
1318                                 *c = 0;
1319                                 return 0;
1320                         }
1321                         tcbaddr = tcb_base + (fidx * TCB_SIZE);
1322                         goto get_count;
1323                 } else {
1324                         return -ERANGE;
1325                 }
1326         } else {
1327                 if (fidx >= adapter->tids.nftids)
1328                         return -ERANGE;
1329
1330                 f = &adapter->tids.ftid_tab[fidx];
1331                 if (!f->valid)
1332                         return -EINVAL;
1333
1334                 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1335         }
1336
1337         f = &adapter->tids.ftid_tab[fidx];
1338         if (!f->valid)
1339                 return -EINVAL;
1340
1341 get_count:
1342         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1343                 /*
1344                  * For T5, the Filter Packet Hit Count is maintained as a
1345                  * 32-bit Big Endian value in the TCB field {timestamp}.
1346                  * Similar to the craziness above, instead of the filter hit
1347                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1348                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1349                  */
1350                 if (get_byte) {
1351                         unsigned int word_offset = 4;
1352                         __be64 be64_byte_count;
1353
1354                         t4_os_lock(&adapter->win0_lock);
1355                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1356                                            tcbaddr +
1357                                            (word_offset * sizeof(__be32)),
1358                                            sizeof(be64_byte_count),
1359                                            &be64_byte_count,
1360                                            T4_MEMORY_READ);
1361                         t4_os_unlock(&adapter->win0_lock);
1362                         if (ret < 0)
1363                                 return ret;
1364                         *c = be64_to_cpu(be64_byte_count);
1365                 } else {
1366                         unsigned int word_offset = 6;
1367                         __be32 be32_count;
1368
1369                         t4_os_lock(&adapter->win0_lock);
1370                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1371                                            tcbaddr +
1372                                            (word_offset * sizeof(__be32)),
1373                                            sizeof(be32_count), &be32_count,
1374                                            T4_MEMORY_READ);
1375                         t4_os_unlock(&adapter->win0_lock);
1376                         if (ret < 0)
1377                                 return ret;
1378                         *c = (u64)be32_to_cpu(be32_count);
1379                 }
1380         }
1381         return 0;
1382 }
1383
1384 /*
1385  * Clear the packet count for the specified filter.
1386  */
1387 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1388                              int hash, bool clear_byte)
1389 {
1390         u64 tcb_mask = 0, tcb_val = 0;
1391         struct filter_entry *f = NULL;
1392         u16 tcb_word = 0;
1393
1394         if (is_hashfilter(adapter) && hash) {
1395                 if (fidx >= adapter->tids.ntids)
1396                         return -ERANGE;
1397
1398                 /* No hitcounts supported for T5 hashfilters */
1399                 if (is_t5(adapter->params.chip))
1400                         return 0;
1401
1402                 f = adapter->tids.tid_tab[fidx];
1403         } else {
1404                 if (fidx >= adapter->tids.nftids)
1405                         return -ERANGE;
1406
1407                 f = &adapter->tids.ftid_tab[fidx];
1408         }
1409
1410         if (!f || !f->valid)
1411                 return -EINVAL;
1412
1413         tcb_word = W_TCB_TIMESTAMP;
1414         tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1415         tcb_val = V_TCB_TIMESTAMP(0ULL);
1416
1417         set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1418
1419         if (clear_byte) {
1420                 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1421                 tcb_mask =
1422                         V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1423                         V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1424                 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1425                           V_TCB_T_RTSEQ_RECENT(0ULL);
1426
1427                 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1428         }
1429
1430         return 0;
1431 }
1432
1433 /**
1434  * Handle a Hash filter delete reply.
1435  */
1436 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1437                                const struct cpl_abort_rpl_rss *rpl)
1438 {
1439         struct tid_info *t = &adap->tids;
1440         struct filter_entry *f;
1441         struct filter_ctx *ctx = NULL;
1442         unsigned int tid = GET_TID(rpl);
1443
1444         f = lookup_tid(t, tid);
1445         if (!f) {
1446                 dev_warn(adap, "%s: could not find filter entry: %u\n",
1447                          __func__, tid);
1448                 return;
1449         }
1450
1451         ctx = f->ctx;
1452
1453         clear_filter(f);
1454         cxgbe_remove_tid(t, 0, tid, 0);
1455         t4_os_free(f);
1456
1457         if (ctx) {
1458                 ctx->result = 0;
1459                 t4_complete(&ctx->completion);
1460         }
1461 }