317830f58b19715d4707e8d7d24400b6cb98aada
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
11 #include "mps_tcam.h"
12 #include "clip_tbl.h"
13 #include "l2t.h"
14 #include "smt.h"
15
16 /**
17  * Initialize Hash Filters
18  */
19 int cxgbe_init_hash_filter(struct adapter *adap)
20 {
21         unsigned int n_user_filters;
22         unsigned int user_filter_perc;
23         int ret;
24         u32 params[7], val[7];
25
26 #define FW_PARAM_DEV(param) \
27         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
28         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
29
30 #define FW_PARAM_PFVF(param) \
31         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
32         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
33         V_FW_PARAMS_PARAM_Y(0) | \
34         V_FW_PARAMS_PARAM_Z(0))
35
36         params[0] = FW_PARAM_DEV(NTID);
37         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
38                               params, val);
39         if (ret < 0)
40                 return ret;
41         adap->tids.ntids = val[0];
42         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
43
44         user_filter_perc = 100;
45         n_user_filters = mult_frac(adap->tids.nftids,
46                                    user_filter_perc,
47                                    100);
48
49         adap->tids.nftids = n_user_filters;
50         adap->params.hash_filter = 1;
51         return 0;
52 }
53
54 /**
55  * Validate if the requested filter specification can be set by checking
56  * if the requested features have been enabled
57  */
58 int cxgbe_validate_filter(struct adapter *adapter,
59                           struct ch_filter_specification *fs)
60 {
61         u32 fconf, iconf;
62
63         /*
64          * Check for unconfigured fields being used.
65          */
66         fconf = fs->cap ? adapter->params.tp.filter_mask :
67                           adapter->params.tp.vlan_pri_map;
68
69         iconf = adapter->params.tp.ingress_config;
70
71 #define S(_field) \
72         (fs->val._field || fs->mask._field)
73 #define U(_mask, _field) \
74         (!(fconf & (_mask)) && S(_field))
75
76         if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
77             U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
78             U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
79             U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
80                 return -EOPNOTSUPP;
81
82         /* Either OVLAN or PFVF match is enabled in hardware, but not both */
83         if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
84             (S(ovlan_vld) && (iconf & F_VNIC)))
85                 return -EOPNOTSUPP;
86
87         /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
88         if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
89             (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
90                 return -EOPNOTSUPP;
91
92 #undef S
93 #undef U
94
95         /*
96          * If the user is requesting that the filter action loop
97          * matching packets back out one of our ports, make sure that
98          * the egress port is in range.
99          */
100         if (fs->action == FILTER_SWITCH &&
101             fs->eport >= adapter->params.nports)
102                 return -ERANGE;
103
104         /*
105          * Don't allow various trivially obvious bogus out-of-range
106          * values ...
107          */
108         if (fs->val.iport >= adapter->params.nports)
109                 return -ERANGE;
110
111         if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
112                 return -EOPNOTSUPP;
113
114         if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
115                 return -EOPNOTSUPP;
116
117         return 0;
118 }
119
120 /**
121  * Get the queue to which the traffic must be steered to.
122  */
123 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
124                                       struct ch_filter_specification *fs)
125 {
126         struct port_info *pi = ethdev2pinfo(dev);
127         struct adapter *adapter = pi->adapter;
128         unsigned int iq;
129
130         /*
131          * If the user has requested steering matching Ingress Packets
132          * to a specific Queue Set, we need to make sure it's in range
133          * for the port and map that into the Absolute Queue ID of the
134          * Queue Set's Response Queue.
135          */
136         if (!fs->dirsteer) {
137                 iq = 0;
138         } else {
139                 /*
140                  * If the iq id is greater than the number of qsets,
141                  * then assume it is an absolute qid.
142                  */
143                 if (fs->iq < pi->n_rx_qsets)
144                         iq = adapter->sge.ethrxq[pi->first_qset +
145                                                  fs->iq].rspq.abs_id;
146                 else
147                         iq = fs->iq;
148         }
149
150         return iq;
151 }
152
153 /* Return an error number if the indicated filter isn't writable ... */
154 static int writable_filter(struct filter_entry *f)
155 {
156         if (f->locked)
157                 return -EPERM;
158         if (f->pending)
159                 return -EBUSY;
160
161         return 0;
162 }
163
164 /**
165  * Send CPL_SET_TCB_FIELD message
166  */
167 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
168                           u16 word, u64 mask, u64 val, int no_reply)
169 {
170         struct rte_mbuf *mbuf;
171         struct cpl_set_tcb_field *req;
172         struct sge_ctrl_txq *ctrlq;
173
174         ctrlq = &adapter->sge.ctrlq[0];
175         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
176         WARN_ON(!mbuf);
177
178         mbuf->data_len = sizeof(*req);
179         mbuf->pkt_len = mbuf->data_len;
180
181         req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
182         memset(req, 0, sizeof(*req));
183         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
184         req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
185                                       V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
186                                       V_NO_REPLY(no_reply));
187         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
188         req->mask = cpu_to_be64(mask);
189         req->val = cpu_to_be64(val);
190
191         t4_mgmt_tx(ctrlq, mbuf);
192 }
193
194 /**
195  * Set one of the t_flags bits in the TCB.
196  */
197 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
198                           unsigned int bit_pos, unsigned int val, int no_reply)
199 {
200         set_tcb_field(adap, ftid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
201                       (unsigned long long)val << bit_pos, no_reply);
202 }
203
204 /**
205  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
206  */
207 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
208                                         struct cpl_set_tcb_field *req,
209                                         unsigned int word,
210                                         u64 mask, u64 val, u8 cookie,
211                                         int no_reply)
212 {
213         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
214         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
215
216         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
217                                       V_ULP_TXPKT_DEST(0));
218         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
219         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
220         sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
221         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
222         req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
223                                       V_QUEUENO(0));
224         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
225         req->mask = cpu_to_be64(mask);
226         req->val = cpu_to_be64(val);
227         sc = (struct ulptx_idata *)(req + 1);
228         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
229         sc->len = cpu_to_be32(0);
230 }
231
232 /**
233  * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
234  * IPv4 requires only 1 slot on all cards.
235  */
236 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
237 {
238         if (family == FILTER_TYPE_IPV6) {
239                 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
240                         return 4;
241
242                 return 2;
243         }
244
245         return 1;
246 }
247
248 /**
249  * Check if entries are already filled.
250  */
251 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
252 {
253         bool result = FALSE;
254         u32 i;
255
256         /* Ensure there's enough slots available. */
257         t4_os_lock(&t->ftid_lock);
258         for (i = fidx; i < fidx + nentries; i++) {
259                 if (rte_bitmap_get(t->ftid_bmap, i)) {
260                         result = TRUE;
261                         break;
262                 }
263         }
264         t4_os_unlock(&t->ftid_lock);
265         return result;
266 }
267
268 /**
269  * Allocate available free entries.
270  */
271 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
272 {
273         struct tid_info *t = &adap->tids;
274         int pos;
275         int size = t->nftids;
276
277         t4_os_lock(&t->ftid_lock);
278         if (nentries > 1)
279                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
280                                                     nentries);
281         else
282                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
283         t4_os_unlock(&t->ftid_lock);
284
285         return pos < size ? pos : -1;
286 }
287
288 /**
289  * Clear a filter and release any of its resources that we own.  This also
290  * clears the filter's "pending" status.
291  */
292 static void clear_filter(struct filter_entry *f)
293 {
294         struct port_info *pi = ethdev2pinfo(f->dev);
295
296         if (f->clipt)
297                 cxgbe_clip_release(f->dev, f->clipt);
298
299         if (f->l2t)
300                 cxgbe_l2t_release(f->l2t);
301
302         if (f->fs.mask.macidx)
303                 cxgbe_mpstcam_remove(pi, f->fs.val.macidx);
304
305         /* The zeroing of the filter rule below clears the filter valid,
306          * pending, locked flags etc. so it's all we need for
307          * this operation.
308          */
309         memset(f, 0, sizeof(*f));
310 }
311
312 /**
313  * Construct hash filter ntuple.
314  */
315 static u64 hash_filter_ntuple(const struct filter_entry *f)
316 {
317         struct adapter *adap = ethdev2adap(f->dev);
318         struct tp_params *tp = &adap->params.tp;
319         u64 ntuple = 0;
320         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
321
322         if (tp->port_shift >= 0 && f->fs.mask.iport)
323                 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
324
325         if (tp->protocol_shift >= 0) {
326                 if (!f->fs.val.proto)
327                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
328                 else
329                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
330         }
331
332         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
333                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
334         if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
335                 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
336         if (tp->vlan_shift >= 0 && f->fs.mask.ivlan)
337                 ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
338                           tp->vlan_shift;
339         if (tp->vnic_shift >= 0) {
340                 if ((adap->params.tp.ingress_config & F_VNIC) &&
341                     f->fs.mask.pfvf_vld)
342                         ntuple |= (u64)(f->fs.val.pfvf_vld << 16 |
343                                         f->fs.val.pf << 13 | f->fs.val.vf) <<
344                                         tp->vnic_shift;
345                 else if (!(adap->params.tp.ingress_config & F_VNIC) &&
346                          f->fs.mask.ovlan_vld)
347                         ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
348                                         f->fs.val.ovlan) << tp->vnic_shift;
349         }
350         if (tp->tos_shift >= 0 && f->fs.mask.tos)
351                 ntuple |= (u64)f->fs.val.tos << tp->tos_shift;
352
353         return ntuple;
354 }
355
356 /**
357  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
358  */
359 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
360                              unsigned int tid)
361 {
362         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
363         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
364
365         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
366                                       V_ULP_TXPKT_DEST(0));
367         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
368         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
369         sc->len = cpu_to_be32(sizeof(*abort_req) -
370                               sizeof(struct work_request_hdr));
371         OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
372         abort_req->rsvd0 = cpu_to_be32(0);
373         abort_req->rsvd1 = 0;
374         abort_req->cmd = CPL_ABORT_NO_RST;
375         sc = (struct ulptx_idata *)(abort_req + 1);
376         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
377         sc->len = cpu_to_be32(0);
378 }
379
380 /**
381  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
382  */
383 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
384                              unsigned int tid)
385 {
386         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
387         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
388
389         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
390                                       V_ULP_TXPKT_DEST(0));
391         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
392         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
393         sc->len = cpu_to_be32(sizeof(*abort_rpl) -
394                               sizeof(struct work_request_hdr));
395         OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
396         abort_rpl->rsvd0 = cpu_to_be32(0);
397         abort_rpl->rsvd1 = 0;
398         abort_rpl->cmd = CPL_ABORT_NO_RST;
399         sc = (struct ulptx_idata *)(abort_rpl + 1);
400         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
401         sc->len = cpu_to_be32(0);
402 }
403
404 /**
405  * Delete the specified hash filter.
406  */
407 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
408                                  unsigned int filter_id,
409                                  struct filter_ctx *ctx)
410 {
411         struct adapter *adapter = ethdev2adap(dev);
412         struct tid_info *t = &adapter->tids;
413         struct filter_entry *f;
414         struct sge_ctrl_txq *ctrlq;
415         unsigned int port_id = ethdev2pinfo(dev)->port_id;
416         int ret;
417
418         if (filter_id > adapter->tids.ntids)
419                 return -E2BIG;
420
421         f = lookup_tid(t, filter_id);
422         if (!f) {
423                 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
424                         __func__, filter_id);
425                 return -EINVAL;
426         }
427
428         ret = writable_filter(f);
429         if (ret)
430                 return ret;
431
432         if (f->valid) {
433                 unsigned int wrlen;
434                 struct rte_mbuf *mbuf;
435                 struct work_request_hdr *wr;
436                 struct ulptx_idata *aligner;
437                 struct cpl_set_tcb_field *req;
438                 struct cpl_abort_req *abort_req;
439                 struct cpl_abort_rpl *abort_rpl;
440
441                 f->ctx = ctx;
442                 f->pending = 1;
443
444                 wrlen = cxgbe_roundup(sizeof(*wr) +
445                                       (sizeof(*req) + sizeof(*aligner)) +
446                                       sizeof(*abort_req) + sizeof(*abort_rpl),
447                                       16);
448
449                 ctrlq = &adapter->sge.ctrlq[port_id];
450                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
451                 if (!mbuf) {
452                         dev_err(adapter, "%s: could not allocate skb ..\n",
453                                 __func__);
454                         goto out_err;
455                 }
456
457                 mbuf->data_len = wrlen;
458                 mbuf->pkt_len = mbuf->data_len;
459
460                 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
461                 INIT_ULPTX_WR(req, wrlen, 0, 0);
462                 wr = (struct work_request_hdr *)req;
463                 wr++;
464                 req = (struct cpl_set_tcb_field *)wr;
465                 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
466                                 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
467                                 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
468                                 0, 1);
469                 aligner = (struct ulptx_idata *)(req + 1);
470                 abort_req = (struct cpl_abort_req *)(aligner + 1);
471                 mk_abort_req_ulp(abort_req, f->tid);
472                 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
473                 mk_abort_rpl_ulp(abort_rpl, f->tid);
474                 t4_mgmt_tx(ctrlq, mbuf);
475         }
476         return 0;
477
478 out_err:
479         return -ENOMEM;
480 }
481
482 /**
483  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
484  */
485 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
486                              unsigned int qid_filterid, struct adapter *adap)
487 {
488         struct cpl_t6_act_open_req6 *req = NULL;
489         u64 local_lo, local_hi, peer_lo, peer_hi;
490         u32 *lip = (u32 *)f->fs.val.lip;
491         u32 *fip = (u32 *)f->fs.val.fip;
492
493         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
494         case CHELSIO_T6:
495                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
496
497                 INIT_TP_WR(req, 0);
498                 break;
499         default:
500                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
501                 return;
502         }
503
504         local_hi = ((u64)lip[1]) << 32 | lip[0];
505         local_lo = ((u64)lip[3]) << 32 | lip[2];
506         peer_hi = ((u64)fip[1]) << 32 | fip[0];
507         peer_lo = ((u64)fip[3]) << 32 | fip[2];
508
509         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
510                                                     qid_filterid));
511         req->local_port = cpu_to_be16(f->fs.val.lport);
512         req->peer_port = cpu_to_be16(f->fs.val.fport);
513         req->local_ip_hi = local_hi;
514         req->local_ip_lo = local_lo;
515         req->peer_ip_hi = peer_hi;
516         req->peer_ip_lo = peer_lo;
517         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
518                                         f->fs.newvlan == VLAN_REWRITE) |
519                                 V_DELACK(f->fs.hitcnts) |
520                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
521                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
522                                            << 1) |
523                                 V_TX_CHAN(f->fs.eport) |
524                                 V_ULP_MODE(ULP_MODE_NONE) |
525                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
526         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
527         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
528                             V_RSS_QUEUE(f->fs.iq) |
529                             F_T5_OPT_2_VALID |
530                             F_RX_CHANNEL |
531                             V_SACK_EN(f->fs.swapmac) |
532                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
533                                          (f->fs.dirsteer << 1)) |
534                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
535 }
536
537 /**
538  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
539  */
540 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
541                             unsigned int qid_filterid, struct adapter *adap)
542 {
543         struct cpl_t6_act_open_req *req = NULL;
544
545         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
546         case CHELSIO_T6:
547                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
548
549                 INIT_TP_WR(req, 0);
550                 break;
551         default:
552                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
553                 return;
554         }
555
556         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
557                                                     qid_filterid));
558         req->local_port = cpu_to_be16(f->fs.val.lport);
559         req->peer_port = cpu_to_be16(f->fs.val.fport);
560         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
561                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
562         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
563                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
564         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
565                                         f->fs.newvlan == VLAN_REWRITE) |
566                                 V_DELACK(f->fs.hitcnts) |
567                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
568                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
569                                            << 1) |
570                                 V_TX_CHAN(f->fs.eport) |
571                                 V_ULP_MODE(ULP_MODE_NONE) |
572                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
573         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
574         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
575                             V_RSS_QUEUE(f->fs.iq) |
576                             F_T5_OPT_2_VALID |
577                             F_RX_CHANNEL |
578                             V_SACK_EN(f->fs.swapmac) |
579                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
580                                          (f->fs.dirsteer << 1)) |
581                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
582 }
583
584 /**
585  * Set the specified hash filter.
586  */
587 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
588                                  struct ch_filter_specification *fs,
589                                  struct filter_ctx *ctx)
590 {
591         struct port_info *pi = ethdev2pinfo(dev);
592         struct adapter *adapter = pi->adapter;
593         struct tid_info *t = &adapter->tids;
594         struct filter_entry *f;
595         struct rte_mbuf *mbuf;
596         struct sge_ctrl_txq *ctrlq;
597         unsigned int iq;
598         int atid, size;
599         int ret = 0;
600
601         ret = cxgbe_validate_filter(adapter, fs);
602         if (ret)
603                 return ret;
604
605         iq = get_filter_steerq(dev, fs);
606
607         ctrlq = &adapter->sge.ctrlq[pi->port_id];
608
609         f = t4_os_alloc(sizeof(*f));
610         if (!f)
611                 return -ENOMEM;
612
613         f->fs = *fs;
614         f->ctx = ctx;
615         f->dev = dev;
616         f->fs.iq = iq;
617
618         /* Allocate MPS TCAM entry to match Destination MAC. */
619         if (f->fs.mask.macidx) {
620                 int idx;
621
622                 idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
623                 if (idx <= 0) {
624                         ret = -ENOMEM;
625                         goto out_err;
626                 }
627
628                 f->fs.val.macidx = idx;
629         }
630
631         /*
632          * If the new filter requires loopback Destination MAC and/or VLAN
633          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
634          * the filter.
635          */
636         if (f->fs.newdmac || f->fs.newvlan == VLAN_INSERT ||
637             f->fs.newvlan == VLAN_REWRITE) {
638                 /* allocate L2T entry for new filter */
639                 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
640                                                    f->fs.eport, f->fs.dmac);
641                 if (!f->l2t) {
642                         ret = -ENOMEM;
643                         goto out_err;
644                 }
645         }
646
647         /* If the new filter requires Source MAC rewriting then we need to
648          * allocate a SMT entry for the filter
649          */
650         if (f->fs.newsmac) {
651                 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
652                 if (!f->smt) {
653                         ret = -EAGAIN;
654                         goto out_err;
655                 }
656         }
657
658         atid = cxgbe_alloc_atid(t, f);
659         if (atid < 0)
660                 goto out_err;
661
662         if (f->fs.type == FILTER_TYPE_IPV6) {
663                 /* IPv6 hash filter */
664                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
665                 if (!f->clipt)
666                         goto free_atid;
667
668                 size = sizeof(struct cpl_t6_act_open_req6);
669                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
670                 if (!mbuf) {
671                         ret = -ENOMEM;
672                         goto free_atid;
673                 }
674
675                 mbuf->data_len = size;
676                 mbuf->pkt_len = mbuf->data_len;
677
678                 mk_act_open_req6(f, mbuf,
679                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
680                                  adapter);
681         } else {
682                 /* IPv4 hash filter */
683                 size = sizeof(struct cpl_t6_act_open_req);
684                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
685                 if (!mbuf) {
686                         ret = -ENOMEM;
687                         goto free_atid;
688                 }
689
690                 mbuf->data_len = size;
691                 mbuf->pkt_len = mbuf->data_len;
692
693                 mk_act_open_req(f, mbuf,
694                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
695                                 adapter);
696         }
697
698         f->pending = 1;
699         t4_mgmt_tx(ctrlq, mbuf);
700         return 0;
701
702 free_atid:
703         cxgbe_free_atid(t, atid);
704
705 out_err:
706         clear_filter(f);
707         t4_os_free(f);
708         return ret;
709 }
710
711 /**
712  * t4_mk_filtdelwr - create a delete filter WR
713  * @adap: adapter context
714  * @ftid: the filter ID
715  * @wr: the filter work request to populate
716  * @qid: ingress queue to receive the delete notification
717  *
718  * Creates a filter work request to delete the supplied filter.  If @qid is
719  * negative the delete notification is suppressed.
720  */
721 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
722                             struct fw_filter2_wr *wr, int qid)
723 {
724         memset(wr, 0, sizeof(*wr));
725         if (adap->params.filter2_wr_support)
726                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
727         else
728                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
729         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
730         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
731                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
732         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
733         if (qid >= 0)
734                 wr->rx_chan_rx_rpl_iq =
735                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
736 }
737
738 /**
739  * Create FW work request to delete the filter at a specified index
740  */
741 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
742 {
743         struct adapter *adapter = ethdev2adap(dev);
744         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
745         struct rte_mbuf *mbuf;
746         struct fw_filter2_wr *fwr;
747         struct sge_ctrl_txq *ctrlq;
748         unsigned int port_id = ethdev2pinfo(dev)->port_id;
749
750         ctrlq = &adapter->sge.ctrlq[port_id];
751         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
752         if (!mbuf)
753                 return -ENOMEM;
754
755         mbuf->data_len = sizeof(*fwr);
756         mbuf->pkt_len = mbuf->data_len;
757
758         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
759         t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
760
761         /*
762          * Mark the filter as "pending" and ship off the Filter Work Request.
763          * When we get the Work Request Reply we'll clear the pending status.
764          */
765         f->pending = 1;
766         t4_mgmt_tx(ctrlq, mbuf);
767         return 0;
768 }
769
770 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
771 {
772         struct adapter *adapter = ethdev2adap(dev);
773         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
774         struct rte_mbuf *mbuf;
775         struct fw_filter2_wr *fwr;
776         struct sge_ctrl_txq *ctrlq;
777         unsigned int port_id = ethdev2pinfo(dev)->port_id;
778         int ret;
779
780         /* If the new filter requires Source MAC rewriting then we need to
781          * allocate a SMT entry for the filter
782          */
783         if (f->fs.newsmac) {
784                 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
785                 if (!f->smt) {
786                         if (f->l2t) {
787                                 cxgbe_l2t_release(f->l2t);
788                                 f->l2t = NULL;
789                         }
790                         return -ENOMEM;
791                 }
792         }
793
794         ctrlq = &adapter->sge.ctrlq[port_id];
795         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
796         if (!mbuf) {
797                 ret = -ENOMEM;
798                 goto out;
799         }
800
801         mbuf->data_len = sizeof(*fwr);
802         mbuf->pkt_len = mbuf->data_len;
803
804         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
805         memset(fwr, 0, sizeof(*fwr));
806
807         /*
808          * Construct the work request to set the filter.
809          */
810         if (adapter->params.filter2_wr_support)
811                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
812         else
813                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
814         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
815         fwr->tid_to_iq =
816                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
817                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
818                             V_FW_FILTER_WR_NOREPLY(0) |
819                             V_FW_FILTER_WR_IQ(f->fs.iq));
820         fwr->del_filter_to_l2tix =
821                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
822                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
823                             V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
824                             V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
825                             V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
826                             V_FW_FILTER_WR_INSVLAN
827                                 (f->fs.newvlan == VLAN_INSERT ||
828                                  f->fs.newvlan == VLAN_REWRITE) |
829                             V_FW_FILTER_WR_RMVLAN
830                                 (f->fs.newvlan == VLAN_REMOVE ||
831                                  f->fs.newvlan == VLAN_REWRITE) |
832                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
833                             V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
834                             V_FW_FILTER_WR_PRIO(f->fs.prio) |
835                             V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
836         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
837         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
838         fwr->frag_to_ovlan_vldm =
839                 (V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
840                  V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
841                  V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
842                  V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
843         fwr->smac_sel = f->smt ? f->smt->hw_idx : 0;
844         fwr->rx_chan_rx_rpl_iq =
845                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
846                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
847                                                      ));
848         fwr->maci_to_matchtypem =
849                 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
850                             V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
851                             V_FW_FILTER_WR_PORT(f->fs.val.iport) |
852                             V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
853         fwr->ptcl = f->fs.val.proto;
854         fwr->ptclm = f->fs.mask.proto;
855         fwr->ttyp = f->fs.val.tos;
856         fwr->ttypm = f->fs.mask.tos;
857         fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
858         fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
859         fwr->ovlan = cpu_to_be16(f->fs.val.ovlan);
860         fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan);
861         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
862         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
863         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
864         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
865         fwr->lp = cpu_to_be16(f->fs.val.lport);
866         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
867         fwr->fp = cpu_to_be16(f->fs.val.fport);
868         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
869
870         if (adapter->params.filter2_wr_support) {
871                 fwr->filter_type_swapmac =
872                          V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
873                 fwr->natmode_to_ulp_type =
874                         V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
875                                                  ULP_MODE_TCPDDP :
876                                                  ULP_MODE_NONE) |
877                         V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
878                 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
879                 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
880                 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
881                 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
882         }
883
884         /*
885          * Mark the filter as "pending" and ship off the Filter Work Request.
886          * When we get the Work Request Reply we'll clear the pending status.
887          */
888         f->pending = 1;
889         t4_mgmt_tx(ctrlq, mbuf);
890         return 0;
891
892 out:
893         return ret;
894 }
895
896 /**
897  * Set the corresponding entries in the bitmap.
898  */
899 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
900 {
901         u32 i;
902
903         t4_os_lock(&t->ftid_lock);
904         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
905                 t4_os_unlock(&t->ftid_lock);
906                 return -EBUSY;
907         }
908
909         for (i = fidx; i < fidx + nentries; i++)
910                 rte_bitmap_set(t->ftid_bmap, i);
911         t4_os_unlock(&t->ftid_lock);
912         return 0;
913 }
914
915 /**
916  * Clear the corresponding entries in the bitmap.
917  */
918 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
919 {
920         u32 i;
921
922         t4_os_lock(&t->ftid_lock);
923         for (i = fidx; i < fidx + nentries; i++)
924                 rte_bitmap_clear(t->ftid_bmap, i);
925         t4_os_unlock(&t->ftid_lock);
926 }
927
928 /**
929  * Check a delete filter request for validity and send it to the hardware.
930  * Return 0 on success, an error number otherwise.  We attach any provided
931  * filter operation context to the internal filter specification in order to
932  * facilitate signaling completion of the operation.
933  */
934 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
935                      struct ch_filter_specification *fs,
936                      struct filter_ctx *ctx)
937 {
938         struct port_info *pi = dev->data->dev_private;
939         struct adapter *adapter = pi->adapter;
940         struct filter_entry *f;
941         unsigned int chip_ver;
942         u8 nentries;
943         int ret;
944
945         if (is_hashfilter(adapter) && fs->cap)
946                 return cxgbe_del_hash_filter(dev, filter_id, ctx);
947
948         if (filter_id >= adapter->tids.nftids)
949                 return -ERANGE;
950
951         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
952
953         /*
954          * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
955          * and 4 slot boundary for cards below T6.
956          */
957         if (fs->type == FILTER_TYPE_IPV6) {
958                 if (chip_ver < CHELSIO_T6)
959                         filter_id &= ~(0x3);
960                 else
961                         filter_id &= ~(0x1);
962         }
963
964         nentries = cxgbe_filter_slots(adapter, fs->type);
965         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
966         if (!ret) {
967                 dev_warn(adap, "%s: could not find filter entry: %u\n",
968                          __func__, filter_id);
969                 return -EINVAL;
970         }
971
972         f = &adapter->tids.ftid_tab[filter_id];
973         ret = writable_filter(f);
974         if (ret)
975                 return ret;
976
977         if (f->valid) {
978                 f->ctx = ctx;
979                 cxgbe_clear_ftid(&adapter->tids,
980                                  f->tid - adapter->tids.ftid_base,
981                                  nentries);
982                 return del_filter_wr(dev, filter_id);
983         }
984
985         /*
986          * If the caller has passed in a Completion Context then we need to
987          * mark it as a successful completion so they don't stall waiting
988          * for it.
989          */
990         if (ctx) {
991                 ctx->result = 0;
992                 t4_complete(&ctx->completion);
993         }
994
995         return 0;
996 }
997
998 /**
999  * Check a Chelsio Filter Request for validity, convert it into our internal
1000  * format and send it to the hardware.  Return 0 on success, an error number
1001  * otherwise.  We attach any provided filter operation context to the internal
1002  * filter specification in order to facilitate signaling completion of the
1003  * operation.
1004  */
1005 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
1006                      struct ch_filter_specification *fs,
1007                      struct filter_ctx *ctx)
1008 {
1009         struct port_info *pi = ethdev2pinfo(dev);
1010         struct adapter *adapter = pi->adapter;
1011         u8 nentries, bitoff[16] = {0};
1012         struct filter_entry *f;
1013         unsigned int chip_ver;
1014         unsigned int fidx, iq;
1015         u32 iconf;
1016         int ret;
1017
1018         if (is_hashfilter(adapter) && fs->cap)
1019                 return cxgbe_set_hash_filter(dev, fs, ctx);
1020
1021         if (filter_id >= adapter->tids.nftids)
1022                 return -ERANGE;
1023
1024         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1025
1026         ret = cxgbe_validate_filter(adapter, fs);
1027         if (ret)
1028                 return ret;
1029
1030         /*
1031          * IPv6 filters occupy four slots and must be aligned on four-slot
1032          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
1033          * must be aligned on two-slot boundaries.
1034          *
1035          * IPv4 filters only occupy a single slot and have no alignment
1036          * requirements.
1037          */
1038         fidx = filter_id;
1039         if (fs->type == FILTER_TYPE_IPV6) {
1040                 if (chip_ver < CHELSIO_T6)
1041                         fidx &= ~(0x3);
1042                 else
1043                         fidx &= ~(0x1);
1044         }
1045
1046         if (fidx != filter_id)
1047                 return -EINVAL;
1048
1049         nentries = cxgbe_filter_slots(adapter, fs->type);
1050         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
1051         if (ret)
1052                 return -EBUSY;
1053
1054         iq = get_filter_steerq(dev, fs);
1055
1056         /*
1057          * Check to make sure that provided filter index is not
1058          * already in use by someone else
1059          */
1060         f = &adapter->tids.ftid_tab[filter_id];
1061         if (f->valid)
1062                 return -EBUSY;
1063
1064         fidx = adapter->tids.ftid_base + filter_id;
1065         ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
1066         if (ret)
1067                 return ret;
1068
1069         /*
1070          * Check to make sure the filter requested is writable ...
1071          */
1072         ret = writable_filter(f);
1073         if (ret) {
1074                 /* Clear the bits we have set above */
1075                 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1076                 return ret;
1077         }
1078
1079         /*
1080          * Convert the filter specification into our internal format.
1081          * We copy the PF/VF specification into the Outer VLAN field
1082          * here so the rest of the code -- including the interface to
1083          * the firmware -- doesn't have to constantly do these checks.
1084          */
1085         f->fs = *fs;
1086         f->fs.iq = iq;
1087         f->dev = dev;
1088
1089         /* Allocate MPS TCAM entry to match Destination MAC. */
1090         if (f->fs.mask.macidx) {
1091                 int idx;
1092
1093                 idx = cxgbe_mpstcam_alloc(pi, f->fs.val.dmac, f->fs.mask.dmac);
1094                 if (idx <= 0) {
1095                         ret = -ENOMEM;
1096                         goto free_tid;
1097                 }
1098
1099                 f->fs.val.macidx = idx;
1100         }
1101
1102         /* Allocate a clip table entry only if we have non-zero IPv6 address. */
1103         if (chip_ver > CHELSIO_T5 && f->fs.type &&
1104             memcmp(f->fs.val.lip, bitoff, sizeof(bitoff))) {
1105                 f->clipt = cxgbe_clip_alloc(dev, (u32 *)&f->fs.val.lip);
1106                 if (!f->clipt) {
1107                         ret = -ENOMEM;
1108                         goto free_tid;
1109                 }
1110         }
1111
1112         /* If the new filter requires loopback Destination MAC and/or VLAN
1113          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1114          * the filter.
1115          */
1116         if (f->fs.newvlan || f->fs.newdmac) {
1117                 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
1118                                                    f->fs.eport, f->fs.dmac);
1119                 if (!f->l2t) {
1120                         ret = -ENOMEM;
1121                         goto free_tid;
1122                 }
1123         }
1124
1125         iconf = adapter->params.tp.ingress_config;
1126
1127         /* Either PFVF or OVLAN can be active, but not both
1128          * So, if PFVF is enabled, then overwrite the OVLAN
1129          * fields with PFVF fields before writing the spec
1130          * to hardware.
1131          */
1132         if (iconf & F_VNIC) {
1133                 f->fs.val.ovlan = fs->val.pf << 13 | fs->val.vf;
1134                 f->fs.mask.ovlan = fs->mask.pf << 13 | fs->mask.vf;
1135                 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1136                 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1137         }
1138
1139         /*
1140          * Attempt to set the filter.  If we don't succeed, we clear
1141          * it and return the failure.
1142          */
1143         f->ctx = ctx;
1144         f->tid = fidx; /* Save the actual tid */
1145         ret = set_filter_wr(dev, filter_id);
1146         if (ret)
1147                 goto free_tid;
1148
1149         return ret;
1150
1151 free_tid:
1152         cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1153         clear_filter(f);
1154         return ret;
1155 }
1156
1157 /**
1158  * Handle a Hash filter write reply.
1159  */
1160 void cxgbe_hash_filter_rpl(struct adapter *adap,
1161                            const struct cpl_act_open_rpl *rpl)
1162 {
1163         struct tid_info *t = &adap->tids;
1164         struct filter_entry *f;
1165         struct filter_ctx *ctx = NULL;
1166         unsigned int tid = GET_TID(rpl);
1167         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1168                                       (be32_to_cpu(rpl->atid_status)));
1169         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1170
1171         f = lookup_atid(t, ftid);
1172         if (!f) {
1173                 dev_warn(adap, "%s: could not find filter entry: %d\n",
1174                          __func__, ftid);
1175                 return;
1176         }
1177
1178         ctx = f->ctx;
1179         f->ctx = NULL;
1180
1181         switch (status) {
1182         case CPL_ERR_NONE: {
1183                 f->tid = tid;
1184                 f->pending = 0;  /* asynchronous setup completed */
1185                 f->valid = 1;
1186
1187                 cxgbe_insert_tid(t, f, f->tid, 0);
1188                 cxgbe_free_atid(t, ftid);
1189                 if (ctx) {
1190                         ctx->tid = f->tid;
1191                         ctx->result = 0;
1192                 }
1193                 if (f->fs.hitcnts)
1194                         set_tcb_field(adap, tid,
1195                                       W_TCB_TIMESTAMP,
1196                                       V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1197                                       V_TCB_T_RTT_TS_RECENT_AGE
1198                                               (M_TCB_T_RTT_TS_RECENT_AGE),
1199                                       V_TCB_TIMESTAMP(0ULL) |
1200                                       V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1201                                       1);
1202                 if (f->fs.newdmac)
1203                         set_tcb_tflag(adap, tid, S_TF_CCTRL_ECE, 1, 1);
1204                 if (f->fs.newvlan == VLAN_INSERT ||
1205                     f->fs.newvlan == VLAN_REWRITE)
1206                         set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1207                 if (f->fs.newsmac) {
1208                         set_tcb_tflag(adap, tid, S_TF_CCTRL_CWR, 1, 1);
1209                         set_tcb_field(adap, tid, W_TCB_SMAC_SEL,
1210                                       V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1211                                       V_TCB_SMAC_SEL(f->smt->hw_idx), 1);
1212                 }
1213                 break;
1214         }
1215         default:
1216                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1217                          __func__, status);
1218
1219                 if (ctx) {
1220                         if (status == CPL_ERR_TCAM_FULL)
1221                                 ctx->result = -EAGAIN;
1222                         else
1223                                 ctx->result = -EINVAL;
1224                 }
1225
1226                 cxgbe_free_atid(t, ftid);
1227                 clear_filter(f);
1228                 t4_os_free(f);
1229         }
1230
1231         if (ctx)
1232                 t4_complete(&ctx->completion);
1233 }
1234
1235 /**
1236  * Handle a LE-TCAM filter write/deletion reply.
1237  */
1238 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1239 {
1240         struct filter_entry *f = NULL;
1241         unsigned int tid = GET_TID(rpl);
1242         int idx, max_fidx = adap->tids.nftids;
1243
1244         /* Get the corresponding filter entry for this tid */
1245         if (adap->tids.ftid_tab) {
1246                 /* Check this in normal filter region */
1247                 idx = tid - adap->tids.ftid_base;
1248                 if (idx >= max_fidx)
1249                         return;
1250
1251                 f = &adap->tids.ftid_tab[idx];
1252                 if (f->tid != tid)
1253                         return;
1254         }
1255
1256         /* We found the filter entry for this tid */
1257         if (f) {
1258                 unsigned int ret = G_COOKIE(rpl->cookie);
1259                 struct filter_ctx *ctx;
1260
1261                 /*
1262                  * Pull off any filter operation context attached to the
1263                  * filter.
1264                  */
1265                 ctx = f->ctx;
1266                 f->ctx = NULL;
1267
1268                 if (ret == FW_FILTER_WR_FLT_ADDED) {
1269                         f->pending = 0;  /* asynchronous setup completed */
1270                         f->valid = 1;
1271                         if (ctx) {
1272                                 ctx->tid = f->tid;
1273                                 ctx->result = 0;
1274                         }
1275                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1276                         /*
1277                          * Clear the filter when we get confirmation from the
1278                          * hardware that the filter has been deleted.
1279                          */
1280                         clear_filter(f);
1281                         if (ctx)
1282                                 ctx->result = 0;
1283                 } else {
1284                         /*
1285                          * Something went wrong.  Issue a warning about the
1286                          * problem and clear everything out.
1287                          */
1288                         dev_warn(adap, "filter %u setup failed with error %u\n",
1289                                  idx, ret);
1290                         clear_filter(f);
1291                         if (ctx)
1292                                 ctx->result = -EINVAL;
1293                 }
1294
1295                 if (ctx)
1296                         t4_complete(&ctx->completion);
1297         }
1298 }
1299
1300 /*
1301  * Retrieve the packet count for the specified filter.
1302  */
1303 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1304                            u64 *c, int hash, bool get_byte)
1305 {
1306         struct filter_entry *f;
1307         unsigned int tcb_base, tcbaddr;
1308         int ret;
1309
1310         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1311         if (is_hashfilter(adapter) && hash) {
1312                 if (fidx < adapter->tids.ntids) {
1313                         f = adapter->tids.tid_tab[fidx];
1314                         if (!f)
1315                                 return -EINVAL;
1316
1317                         if (is_t5(adapter->params.chip)) {
1318                                 *c = 0;
1319                                 return 0;
1320                         }
1321                         tcbaddr = tcb_base + (fidx * TCB_SIZE);
1322                         goto get_count;
1323                 } else {
1324                         return -ERANGE;
1325                 }
1326         } else {
1327                 if (fidx >= adapter->tids.nftids)
1328                         return -ERANGE;
1329
1330                 f = &adapter->tids.ftid_tab[fidx];
1331                 if (!f->valid)
1332                         return -EINVAL;
1333
1334                 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1335         }
1336
1337         f = &adapter->tids.ftid_tab[fidx];
1338         if (!f->valid)
1339                 return -EINVAL;
1340
1341 get_count:
1342         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1343                 /*
1344                  * For T5, the Filter Packet Hit Count is maintained as a
1345                  * 32-bit Big Endian value in the TCB field {timestamp}.
1346                  * Similar to the craziness above, instead of the filter hit
1347                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1348                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1349                  */
1350                 if (get_byte) {
1351                         unsigned int word_offset = 4;
1352                         __be64 be64_byte_count;
1353
1354                         t4_os_lock(&adapter->win0_lock);
1355                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1356                                            tcbaddr +
1357                                            (word_offset * sizeof(__be32)),
1358                                            sizeof(be64_byte_count),
1359                                            &be64_byte_count,
1360                                            T4_MEMORY_READ);
1361                         t4_os_unlock(&adapter->win0_lock);
1362                         if (ret < 0)
1363                                 return ret;
1364                         *c = be64_to_cpu(be64_byte_count);
1365                 } else {
1366                         unsigned int word_offset = 6;
1367                         __be32 be32_count;
1368
1369                         t4_os_lock(&adapter->win0_lock);
1370                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1371                                            tcbaddr +
1372                                            (word_offset * sizeof(__be32)),
1373                                            sizeof(be32_count), &be32_count,
1374                                            T4_MEMORY_READ);
1375                         t4_os_unlock(&adapter->win0_lock);
1376                         if (ret < 0)
1377                                 return ret;
1378                         *c = (u64)be32_to_cpu(be32_count);
1379                 }
1380         }
1381         return 0;
1382 }
1383
1384 /*
1385  * Clear the packet count for the specified filter.
1386  */
1387 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1388                              int hash, bool clear_byte)
1389 {
1390         u64 tcb_mask = 0, tcb_val = 0;
1391         struct filter_entry *f = NULL;
1392         u16 tcb_word = 0;
1393
1394         if (is_hashfilter(adapter) && hash) {
1395                 if (fidx >= adapter->tids.ntids)
1396                         return -ERANGE;
1397
1398                 /* No hitcounts supported for T5 hashfilters */
1399                 if (is_t5(adapter->params.chip))
1400                         return 0;
1401
1402                 f = adapter->tids.tid_tab[fidx];
1403         } else {
1404                 if (fidx >= adapter->tids.nftids)
1405                         return -ERANGE;
1406
1407                 f = &adapter->tids.ftid_tab[fidx];
1408         }
1409
1410         if (!f || !f->valid)
1411                 return -EINVAL;
1412
1413         tcb_word = W_TCB_TIMESTAMP;
1414         tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1415         tcb_val = V_TCB_TIMESTAMP(0ULL);
1416
1417         set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1418
1419         if (clear_byte) {
1420                 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1421                 tcb_mask =
1422                         V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1423                         V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1424                 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1425                           V_TCB_T_RTSEQ_RECENT(0ULL);
1426
1427                 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1428         }
1429
1430         return 0;
1431 }
1432
1433 /**
1434  * Handle a Hash filter delete reply.
1435  */
1436 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1437                                const struct cpl_abort_rpl_rss *rpl)
1438 {
1439         struct tid_info *t = &adap->tids;
1440         struct filter_entry *f;
1441         struct filter_ctx *ctx = NULL;
1442         unsigned int tid = GET_TID(rpl);
1443
1444         f = lookup_tid(t, tid);
1445         if (!f) {
1446                 dev_warn(adap, "%s: could not find filter entry: %u\n",
1447                          __func__, tid);
1448                 return;
1449         }
1450
1451         ctx = f->ctx;
1452
1453         clear_filter(f);
1454         cxgbe_remove_tid(t, 0, tid, 0);
1455         t4_os_free(f);
1456
1457         if (ctx) {
1458                 ctx->result = 0;
1459                 t4_complete(&ctx->completion);
1460         }
1461 }