net/cxgbe: use firmware API for validating filter spec
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
11 #include "clip_tbl.h"
12 #include "l2t.h"
13 #include "smt.h"
14
15 /**
16  * Initialize Hash Filters
17  */
18 int cxgbe_init_hash_filter(struct adapter *adap)
19 {
20         unsigned int n_user_filters;
21         unsigned int user_filter_perc;
22         int ret;
23         u32 params[7], val[7];
24
25 #define FW_PARAM_DEV(param) \
26         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
27         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
28
29 #define FW_PARAM_PFVF(param) \
30         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
31         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
32         V_FW_PARAMS_PARAM_Y(0) | \
33         V_FW_PARAMS_PARAM_Z(0))
34
35         params[0] = FW_PARAM_DEV(NTID);
36         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
37                               params, val);
38         if (ret < 0)
39                 return ret;
40         adap->tids.ntids = val[0];
41         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
42
43         user_filter_perc = 100;
44         n_user_filters = mult_frac(adap->tids.nftids,
45                                    user_filter_perc,
46                                    100);
47
48         adap->tids.nftids = n_user_filters;
49         adap->params.hash_filter = 1;
50         return 0;
51 }
52
53 /**
54  * Validate if the requested filter specification can be set by checking
55  * if the requested features have been enabled
56  */
57 int cxgbe_validate_filter(struct adapter *adapter,
58                           struct ch_filter_specification *fs)
59 {
60         u32 fconf, iconf;
61
62         /*
63          * Check for unconfigured fields being used.
64          */
65         fconf = fs->cap ? adapter->params.tp.filter_mask :
66                           adapter->params.tp.vlan_pri_map;
67
68         iconf = adapter->params.tp.ingress_config;
69
70 #define S(_field) \
71         (fs->val._field || fs->mask._field)
72 #define U(_mask, _field) \
73         (!(fconf & (_mask)) && S(_field))
74
75         if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
76             U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
77             U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
78             U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
79                 return -EOPNOTSUPP;
80
81         /* Either OVLAN or PFVF match is enabled in hardware, but not both */
82         if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
83             (S(ovlan_vld) && (iconf & F_VNIC)))
84                 return -EOPNOTSUPP;
85
86         /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
87         if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
88             (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
89                 return -EOPNOTSUPP;
90
91 #undef S
92 #undef U
93
94         /*
95          * If the user is requesting that the filter action loop
96          * matching packets back out one of our ports, make sure that
97          * the egress port is in range.
98          */
99         if (fs->action == FILTER_SWITCH &&
100             fs->eport >= adapter->params.nports)
101                 return -ERANGE;
102
103         /*
104          * Don't allow various trivially obvious bogus out-of-range
105          * values ...
106          */
107         if (fs->val.iport >= adapter->params.nports)
108                 return -ERANGE;
109
110         if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
111                 return -EOPNOTSUPP;
112
113         if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
114                 return -EOPNOTSUPP;
115
116         return 0;
117 }
118
119 /**
120  * Get the queue to which the traffic must be steered to.
121  */
122 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
123                                       struct ch_filter_specification *fs)
124 {
125         struct port_info *pi = ethdev2pinfo(dev);
126         struct adapter *adapter = pi->adapter;
127         unsigned int iq;
128
129         /*
130          * If the user has requested steering matching Ingress Packets
131          * to a specific Queue Set, we need to make sure it's in range
132          * for the port and map that into the Absolute Queue ID of the
133          * Queue Set's Response Queue.
134          */
135         if (!fs->dirsteer) {
136                 iq = 0;
137         } else {
138                 /*
139                  * If the iq id is greater than the number of qsets,
140                  * then assume it is an absolute qid.
141                  */
142                 if (fs->iq < pi->n_rx_qsets)
143                         iq = adapter->sge.ethrxq[pi->first_qset +
144                                                  fs->iq].rspq.abs_id;
145                 else
146                         iq = fs->iq;
147         }
148
149         return iq;
150 }
151
152 /* Return an error number if the indicated filter isn't writable ... */
153 static int writable_filter(struct filter_entry *f)
154 {
155         if (f->locked)
156                 return -EPERM;
157         if (f->pending)
158                 return -EBUSY;
159
160         return 0;
161 }
162
163 /**
164  * Send CPL_SET_TCB_FIELD message
165  */
166 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
167                           u16 word, u64 mask, u64 val, int no_reply)
168 {
169         struct rte_mbuf *mbuf;
170         struct cpl_set_tcb_field *req;
171         struct sge_ctrl_txq *ctrlq;
172
173         ctrlq = &adapter->sge.ctrlq[0];
174         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
175         WARN_ON(!mbuf);
176
177         mbuf->data_len = sizeof(*req);
178         mbuf->pkt_len = mbuf->data_len;
179
180         req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
181         memset(req, 0, sizeof(*req));
182         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
183         req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
184                                       V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
185                                       V_NO_REPLY(no_reply));
186         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
187         req->mask = cpu_to_be64(mask);
188         req->val = cpu_to_be64(val);
189
190         t4_mgmt_tx(ctrlq, mbuf);
191 }
192
193 /**
194  * Set one of the t_flags bits in the TCB.
195  */
196 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
197                           unsigned int bit_pos, unsigned int val, int no_reply)
198 {
199         set_tcb_field(adap, ftid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
200                       (unsigned long long)val << bit_pos, no_reply);
201 }
202
203 /**
204  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
205  */
206 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
207                                         struct cpl_set_tcb_field *req,
208                                         unsigned int word,
209                                         u64 mask, u64 val, u8 cookie,
210                                         int no_reply)
211 {
212         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
213         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
214
215         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
216                                       V_ULP_TXPKT_DEST(0));
217         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
218         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
219         sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
220         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
221         req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
222                                       V_QUEUENO(0));
223         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
224         req->mask = cpu_to_be64(mask);
225         req->val = cpu_to_be64(val);
226         sc = (struct ulptx_idata *)(req + 1);
227         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
228         sc->len = cpu_to_be32(0);
229 }
230
231 /**
232  * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
233  * IPv4 requires only 1 slot on all cards.
234  */
235 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
236 {
237         if (family == FILTER_TYPE_IPV6) {
238                 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
239                         return 4;
240
241                 return 2;
242         }
243
244         return 1;
245 }
246
247 /**
248  * Check if entries are already filled.
249  */
250 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
251 {
252         bool result = FALSE;
253         u32 i;
254
255         /* Ensure there's enough slots available. */
256         t4_os_lock(&t->ftid_lock);
257         for (i = fidx; i < fidx + nentries; i++) {
258                 if (rte_bitmap_get(t->ftid_bmap, i)) {
259                         result = TRUE;
260                         break;
261                 }
262         }
263         t4_os_unlock(&t->ftid_lock);
264         return result;
265 }
266
267 /**
268  * Allocate available free entries.
269  */
270 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
271 {
272         struct tid_info *t = &adap->tids;
273         int pos;
274         int size = t->nftids;
275
276         t4_os_lock(&t->ftid_lock);
277         if (nentries > 1)
278                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
279                                                     nentries);
280         else
281                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
282         t4_os_unlock(&t->ftid_lock);
283
284         return pos < size ? pos : -1;
285 }
286
287 /**
288  * Construct hash filter ntuple.
289  */
290 static u64 hash_filter_ntuple(const struct filter_entry *f)
291 {
292         struct adapter *adap = ethdev2adap(f->dev);
293         struct tp_params *tp = &adap->params.tp;
294         u64 ntuple = 0;
295         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
296
297         if (tp->port_shift >= 0 && f->fs.mask.iport)
298                 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
299
300         if (tp->protocol_shift >= 0) {
301                 if (!f->fs.val.proto)
302                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
303                 else
304                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
305         }
306
307         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
308                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
309         if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
310                 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
311         if (tp->vlan_shift >= 0 && f->fs.mask.ivlan)
312                 ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
313                           tp->vlan_shift;
314         if (tp->vnic_shift >= 0) {
315                 if ((adap->params.tp.ingress_config & F_VNIC) &&
316                     f->fs.mask.pfvf_vld)
317                         ntuple |= (u64)(f->fs.val.pfvf_vld << 16 |
318                                         f->fs.val.pf << 13 | f->fs.val.vf) <<
319                                         tp->vnic_shift;
320                 else if (!(adap->params.tp.ingress_config & F_VNIC) &&
321                          f->fs.mask.ovlan_vld)
322                         ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
323                                         f->fs.val.ovlan) << tp->vnic_shift;
324         }
325         if (tp->tos_shift >= 0 && f->fs.mask.tos)
326                 ntuple |= (u64)f->fs.val.tos << tp->tos_shift;
327
328         return ntuple;
329 }
330
331 /**
332  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
333  */
334 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
335                              unsigned int tid)
336 {
337         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
338         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
339
340         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
341                                       V_ULP_TXPKT_DEST(0));
342         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
343         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
344         sc->len = cpu_to_be32(sizeof(*abort_req) -
345                               sizeof(struct work_request_hdr));
346         OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
347         abort_req->rsvd0 = cpu_to_be32(0);
348         abort_req->rsvd1 = 0;
349         abort_req->cmd = CPL_ABORT_NO_RST;
350         sc = (struct ulptx_idata *)(abort_req + 1);
351         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
352         sc->len = cpu_to_be32(0);
353 }
354
355 /**
356  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
357  */
358 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
359                              unsigned int tid)
360 {
361         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
362         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
363
364         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
365                                       V_ULP_TXPKT_DEST(0));
366         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
367         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
368         sc->len = cpu_to_be32(sizeof(*abort_rpl) -
369                               sizeof(struct work_request_hdr));
370         OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
371         abort_rpl->rsvd0 = cpu_to_be32(0);
372         abort_rpl->rsvd1 = 0;
373         abort_rpl->cmd = CPL_ABORT_NO_RST;
374         sc = (struct ulptx_idata *)(abort_rpl + 1);
375         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
376         sc->len = cpu_to_be32(0);
377 }
378
379 /**
380  * Delete the specified hash filter.
381  */
382 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
383                                  unsigned int filter_id,
384                                  struct filter_ctx *ctx)
385 {
386         struct adapter *adapter = ethdev2adap(dev);
387         struct tid_info *t = &adapter->tids;
388         struct filter_entry *f;
389         struct sge_ctrl_txq *ctrlq;
390         unsigned int port_id = ethdev2pinfo(dev)->port_id;
391         int ret;
392
393         if (filter_id > adapter->tids.ntids)
394                 return -E2BIG;
395
396         f = lookup_tid(t, filter_id);
397         if (!f) {
398                 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
399                         __func__, filter_id);
400                 return -EINVAL;
401         }
402
403         ret = writable_filter(f);
404         if (ret)
405                 return ret;
406
407         if (f->valid) {
408                 unsigned int wrlen;
409                 struct rte_mbuf *mbuf;
410                 struct work_request_hdr *wr;
411                 struct ulptx_idata *aligner;
412                 struct cpl_set_tcb_field *req;
413                 struct cpl_abort_req *abort_req;
414                 struct cpl_abort_rpl *abort_rpl;
415
416                 f->ctx = ctx;
417                 f->pending = 1;
418
419                 wrlen = cxgbe_roundup(sizeof(*wr) +
420                                       (sizeof(*req) + sizeof(*aligner)) +
421                                       sizeof(*abort_req) + sizeof(*abort_rpl),
422                                       16);
423
424                 ctrlq = &adapter->sge.ctrlq[port_id];
425                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
426                 if (!mbuf) {
427                         dev_err(adapter, "%s: could not allocate skb ..\n",
428                                 __func__);
429                         goto out_err;
430                 }
431
432                 mbuf->data_len = wrlen;
433                 mbuf->pkt_len = mbuf->data_len;
434
435                 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
436                 INIT_ULPTX_WR(req, wrlen, 0, 0);
437                 wr = (struct work_request_hdr *)req;
438                 wr++;
439                 req = (struct cpl_set_tcb_field *)wr;
440                 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
441                                 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
442                                 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
443                                 0, 1);
444                 aligner = (struct ulptx_idata *)(req + 1);
445                 abort_req = (struct cpl_abort_req *)(aligner + 1);
446                 mk_abort_req_ulp(abort_req, f->tid);
447                 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
448                 mk_abort_rpl_ulp(abort_rpl, f->tid);
449                 t4_mgmt_tx(ctrlq, mbuf);
450         }
451         return 0;
452
453 out_err:
454         return -ENOMEM;
455 }
456
457 /**
458  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
459  */
460 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
461                              unsigned int qid_filterid, struct adapter *adap)
462 {
463         struct cpl_t6_act_open_req6 *req = NULL;
464         u64 local_lo, local_hi, peer_lo, peer_hi;
465         u32 *lip = (u32 *)f->fs.val.lip;
466         u32 *fip = (u32 *)f->fs.val.fip;
467
468         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
469         case CHELSIO_T6:
470                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
471
472                 INIT_TP_WR(req, 0);
473                 break;
474         default:
475                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
476                 return;
477         }
478
479         local_hi = ((u64)lip[1]) << 32 | lip[0];
480         local_lo = ((u64)lip[3]) << 32 | lip[2];
481         peer_hi = ((u64)fip[1]) << 32 | fip[0];
482         peer_lo = ((u64)fip[3]) << 32 | fip[2];
483
484         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
485                                                     qid_filterid));
486         req->local_port = cpu_to_be16(f->fs.val.lport);
487         req->peer_port = cpu_to_be16(f->fs.val.fport);
488         req->local_ip_hi = local_hi;
489         req->local_ip_lo = local_lo;
490         req->peer_ip_hi = peer_hi;
491         req->peer_ip_lo = peer_lo;
492         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
493                                         f->fs.newvlan == VLAN_REWRITE) |
494                                 V_DELACK(f->fs.hitcnts) |
495                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
496                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
497                                            << 1) |
498                                 V_TX_CHAN(f->fs.eport) |
499                                 V_ULP_MODE(ULP_MODE_NONE) |
500                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
501         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
502         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
503                             V_RSS_QUEUE(f->fs.iq) |
504                             F_T5_OPT_2_VALID |
505                             F_RX_CHANNEL |
506                             V_SACK_EN(f->fs.swapmac) |
507                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
508                                          (f->fs.dirsteer << 1)) |
509                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
510 }
511
512 /**
513  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
514  */
515 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
516                             unsigned int qid_filterid, struct adapter *adap)
517 {
518         struct cpl_t6_act_open_req *req = NULL;
519
520         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
521         case CHELSIO_T6:
522                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
523
524                 INIT_TP_WR(req, 0);
525                 break;
526         default:
527                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
528                 return;
529         }
530
531         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
532                                                     qid_filterid));
533         req->local_port = cpu_to_be16(f->fs.val.lport);
534         req->peer_port = cpu_to_be16(f->fs.val.fport);
535         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
536                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
537         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
538                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
539         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
540                                         f->fs.newvlan == VLAN_REWRITE) |
541                                 V_DELACK(f->fs.hitcnts) |
542                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
543                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
544                                            << 1) |
545                                 V_TX_CHAN(f->fs.eport) |
546                                 V_ULP_MODE(ULP_MODE_NONE) |
547                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
548         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
549         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
550                             V_RSS_QUEUE(f->fs.iq) |
551                             F_T5_OPT_2_VALID |
552                             F_RX_CHANNEL |
553                             V_SACK_EN(f->fs.swapmac) |
554                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
555                                          (f->fs.dirsteer << 1)) |
556                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
557 }
558
559 /**
560  * Set the specified hash filter.
561  */
562 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
563                                  struct ch_filter_specification *fs,
564                                  struct filter_ctx *ctx)
565 {
566         struct port_info *pi = ethdev2pinfo(dev);
567         struct adapter *adapter = pi->adapter;
568         struct tid_info *t = &adapter->tids;
569         struct filter_entry *f;
570         struct rte_mbuf *mbuf;
571         struct sge_ctrl_txq *ctrlq;
572         unsigned int iq;
573         int atid, size;
574         int ret = 0;
575
576         ret = cxgbe_validate_filter(adapter, fs);
577         if (ret)
578                 return ret;
579
580         iq = get_filter_steerq(dev, fs);
581
582         ctrlq = &adapter->sge.ctrlq[pi->port_id];
583
584         f = t4_os_alloc(sizeof(*f));
585         if (!f)
586                 goto out_err;
587
588         f->fs = *fs;
589         f->ctx = ctx;
590         f->dev = dev;
591         f->fs.iq = iq;
592
593         /*
594          * If the new filter requires loopback Destination MAC and/or VLAN
595          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
596          * the filter.
597          */
598         if (f->fs.newdmac || f->fs.newvlan == VLAN_INSERT ||
599             f->fs.newvlan == VLAN_REWRITE) {
600                 /* allocate L2T entry for new filter */
601                 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
602                                                    f->fs.eport, f->fs.dmac);
603                 if (!f->l2t) {
604                         ret = -ENOMEM;
605                         goto out_err;
606                 }
607         }
608
609         /* If the new filter requires Source MAC rewriting then we need to
610          * allocate a SMT entry for the filter
611          */
612         if (f->fs.newsmac) {
613                 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
614                 if (!f->smt) {
615                         ret = -EAGAIN;
616                         goto out_err;
617                 }
618         }
619
620         atid = cxgbe_alloc_atid(t, f);
621         if (atid < 0)
622                 goto out_err;
623
624         if (f->fs.type == FILTER_TYPE_IPV6) {
625                 /* IPv6 hash filter */
626                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
627                 if (!f->clipt)
628                         goto free_atid;
629
630                 size = sizeof(struct cpl_t6_act_open_req6);
631                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
632                 if (!mbuf) {
633                         ret = -ENOMEM;
634                         goto free_clip;
635                 }
636
637                 mbuf->data_len = size;
638                 mbuf->pkt_len = mbuf->data_len;
639
640                 mk_act_open_req6(f, mbuf,
641                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
642                                  adapter);
643         } else {
644                 /* IPv4 hash filter */
645                 size = sizeof(struct cpl_t6_act_open_req);
646                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
647                 if (!mbuf) {
648                         ret = -ENOMEM;
649                         goto free_atid;
650                 }
651
652                 mbuf->data_len = size;
653                 mbuf->pkt_len = mbuf->data_len;
654
655                 mk_act_open_req(f, mbuf,
656                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
657                                 adapter);
658         }
659
660         f->pending = 1;
661         t4_mgmt_tx(ctrlq, mbuf);
662         return 0;
663
664 free_clip:
665         cxgbe_clip_release(f->dev, f->clipt);
666 free_atid:
667         cxgbe_free_atid(t, atid);
668
669 out_err:
670         t4_os_free(f);
671         return ret;
672 }
673
674 /**
675  * Clear a filter and release any of its resources that we own.  This also
676  * clears the filter's "pending" status.
677  */
678 static void clear_filter(struct filter_entry *f)
679 {
680         if (f->clipt)
681                 cxgbe_clip_release(f->dev, f->clipt);
682
683         /*
684          * The zeroing of the filter rule below clears the filter valid,
685          * pending, locked flags etc. so it's all we need for
686          * this operation.
687          */
688         memset(f, 0, sizeof(*f));
689 }
690
691 /**
692  * t4_mk_filtdelwr - create a delete filter WR
693  * @adap: adapter context
694  * @ftid: the filter ID
695  * @wr: the filter work request to populate
696  * @qid: ingress queue to receive the delete notification
697  *
698  * Creates a filter work request to delete the supplied filter.  If @qid is
699  * negative the delete notification is suppressed.
700  */
701 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
702                             struct fw_filter2_wr *wr, int qid)
703 {
704         memset(wr, 0, sizeof(*wr));
705         if (adap->params.filter2_wr_support)
706                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
707         else
708                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
709         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
710         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
711                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
712         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
713         if (qid >= 0)
714                 wr->rx_chan_rx_rpl_iq =
715                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
716 }
717
718 /**
719  * Create FW work request to delete the filter at a specified index
720  */
721 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
722 {
723         struct adapter *adapter = ethdev2adap(dev);
724         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
725         struct rte_mbuf *mbuf;
726         struct fw_filter2_wr *fwr;
727         struct sge_ctrl_txq *ctrlq;
728         unsigned int port_id = ethdev2pinfo(dev)->port_id;
729
730         ctrlq = &adapter->sge.ctrlq[port_id];
731         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
732         if (!mbuf)
733                 return -ENOMEM;
734
735         mbuf->data_len = sizeof(*fwr);
736         mbuf->pkt_len = mbuf->data_len;
737
738         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
739         t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
740
741         /*
742          * Mark the filter as "pending" and ship off the Filter Work Request.
743          * When we get the Work Request Reply we'll clear the pending status.
744          */
745         f->pending = 1;
746         t4_mgmt_tx(ctrlq, mbuf);
747         return 0;
748 }
749
750 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
751 {
752         struct adapter *adapter = ethdev2adap(dev);
753         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
754         struct rte_mbuf *mbuf;
755         struct fw_filter2_wr *fwr;
756         struct sge_ctrl_txq *ctrlq;
757         unsigned int port_id = ethdev2pinfo(dev)->port_id;
758         int ret;
759
760         /*
761          * If the new filter requires loopback Destination MAC and/or VLAN
762          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
763          * the filter.
764          */
765         if (f->fs.newvlan || f->fs.newdmac) {
766                 /* allocate L2T entry for new filter */
767                 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
768                                                    f->fs.eport, f->fs.dmac);
769
770                 if (!f->l2t)
771                         return -ENOMEM;
772         }
773
774         /* If the new filter requires Source MAC rewriting then we need to
775          * allocate a SMT entry for the filter
776          */
777         if (f->fs.newsmac) {
778                 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
779                 if (!f->smt) {
780                         if (f->l2t) {
781                                 cxgbe_l2t_release(f->l2t);
782                                 f->l2t = NULL;
783                         }
784                         return -ENOMEM;
785                 }
786         }
787
788         ctrlq = &adapter->sge.ctrlq[port_id];
789         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
790         if (!mbuf) {
791                 ret = -ENOMEM;
792                 goto out;
793         }
794
795         mbuf->data_len = sizeof(*fwr);
796         mbuf->pkt_len = mbuf->data_len;
797
798         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
799         memset(fwr, 0, sizeof(*fwr));
800
801         /*
802          * Construct the work request to set the filter.
803          */
804         if (adapter->params.filter2_wr_support)
805                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
806         else
807                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
808         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
809         fwr->tid_to_iq =
810                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
811                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
812                             V_FW_FILTER_WR_NOREPLY(0) |
813                             V_FW_FILTER_WR_IQ(f->fs.iq));
814         fwr->del_filter_to_l2tix =
815                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
816                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
817                             V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
818                             V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
819                             V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
820                             V_FW_FILTER_WR_INSVLAN
821                                 (f->fs.newvlan == VLAN_INSERT ||
822                                  f->fs.newvlan == VLAN_REWRITE) |
823                             V_FW_FILTER_WR_RMVLAN
824                                 (f->fs.newvlan == VLAN_REMOVE ||
825                                  f->fs.newvlan == VLAN_REWRITE) |
826                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
827                             V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
828                             V_FW_FILTER_WR_PRIO(f->fs.prio) |
829                             V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
830         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
831         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
832         fwr->frag_to_ovlan_vldm =
833                 (V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
834                  V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
835                  V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
836                  V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
837         fwr->smac_sel = f->smt ? f->smt->hw_idx : 0;
838         fwr->rx_chan_rx_rpl_iq =
839                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
840                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
841                                                      ));
842         fwr->maci_to_matchtypem =
843                 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
844                             V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
845                             V_FW_FILTER_WR_PORT(f->fs.val.iport) |
846                             V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
847         fwr->ptcl = f->fs.val.proto;
848         fwr->ptclm = f->fs.mask.proto;
849         fwr->ttyp = f->fs.val.tos;
850         fwr->ttypm = f->fs.mask.tos;
851         fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
852         fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
853         fwr->ovlan = cpu_to_be16(f->fs.val.ovlan);
854         fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan);
855         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
856         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
857         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
858         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
859         fwr->lp = cpu_to_be16(f->fs.val.lport);
860         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
861         fwr->fp = cpu_to_be16(f->fs.val.fport);
862         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
863
864         if (adapter->params.filter2_wr_support) {
865                 fwr->filter_type_swapmac =
866                          V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
867                 fwr->natmode_to_ulp_type =
868                         V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
869                                                  ULP_MODE_TCPDDP :
870                                                  ULP_MODE_NONE) |
871                         V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
872                 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
873                 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
874                 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
875                 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
876         }
877
878         /*
879          * Mark the filter as "pending" and ship off the Filter Work Request.
880          * When we get the Work Request Reply we'll clear the pending status.
881          */
882         f->pending = 1;
883         t4_mgmt_tx(ctrlq, mbuf);
884         return 0;
885
886 out:
887         return ret;
888 }
889
890 /**
891  * Set the corresponding entries in the bitmap.
892  */
893 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
894 {
895         u32 i;
896
897         t4_os_lock(&t->ftid_lock);
898         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
899                 t4_os_unlock(&t->ftid_lock);
900                 return -EBUSY;
901         }
902
903         for (i = fidx; i < fidx + nentries; i++)
904                 rte_bitmap_set(t->ftid_bmap, i);
905         t4_os_unlock(&t->ftid_lock);
906         return 0;
907 }
908
909 /**
910  * Clear the corresponding entries in the bitmap.
911  */
912 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
913 {
914         u32 i;
915
916         t4_os_lock(&t->ftid_lock);
917         for (i = fidx; i < fidx + nentries; i++)
918                 rte_bitmap_clear(t->ftid_bmap, i);
919         t4_os_unlock(&t->ftid_lock);
920 }
921
922 /**
923  * Check a delete filter request for validity and send it to the hardware.
924  * Return 0 on success, an error number otherwise.  We attach any provided
925  * filter operation context to the internal filter specification in order to
926  * facilitate signaling completion of the operation.
927  */
928 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
929                      struct ch_filter_specification *fs,
930                      struct filter_ctx *ctx)
931 {
932         struct port_info *pi = dev->data->dev_private;
933         struct adapter *adapter = pi->adapter;
934         struct filter_entry *f;
935         unsigned int chip_ver;
936         u8 nentries;
937         int ret;
938
939         if (is_hashfilter(adapter) && fs->cap)
940                 return cxgbe_del_hash_filter(dev, filter_id, ctx);
941
942         if (filter_id >= adapter->tids.nftids)
943                 return -ERANGE;
944
945         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
946
947         /*
948          * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
949          * and 4 slot boundary for cards below T6.
950          */
951         if (fs->type == FILTER_TYPE_IPV6) {
952                 if (chip_ver < CHELSIO_T6)
953                         filter_id &= ~(0x3);
954                 else
955                         filter_id &= ~(0x1);
956         }
957
958         nentries = cxgbe_filter_slots(adapter, fs->type);
959         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
960         if (!ret) {
961                 dev_warn(adap, "%s: could not find filter entry: %u\n",
962                          __func__, filter_id);
963                 return -EINVAL;
964         }
965
966         f = &adapter->tids.ftid_tab[filter_id];
967         ret = writable_filter(f);
968         if (ret)
969                 return ret;
970
971         if (f->valid) {
972                 f->ctx = ctx;
973                 cxgbe_clear_ftid(&adapter->tids,
974                                  f->tid - adapter->tids.ftid_base,
975                                  nentries);
976                 return del_filter_wr(dev, filter_id);
977         }
978
979         /*
980          * If the caller has passed in a Completion Context then we need to
981          * mark it as a successful completion so they don't stall waiting
982          * for it.
983          */
984         if (ctx) {
985                 ctx->result = 0;
986                 t4_complete(&ctx->completion);
987         }
988
989         return 0;
990 }
991
992 /**
993  * Check a Chelsio Filter Request for validity, convert it into our internal
994  * format and send it to the hardware.  Return 0 on success, an error number
995  * otherwise.  We attach any provided filter operation context to the internal
996  * filter specification in order to facilitate signaling completion of the
997  * operation.
998  */
999 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
1000                      struct ch_filter_specification *fs,
1001                      struct filter_ctx *ctx)
1002 {
1003         struct port_info *pi = ethdev2pinfo(dev);
1004         struct adapter *adapter = pi->adapter;
1005         u8 nentries, bitoff[16] = {0};
1006         struct filter_entry *f;
1007         unsigned int chip_ver;
1008         unsigned int fidx, iq;
1009         u32 iconf;
1010         int ret;
1011
1012         if (is_hashfilter(adapter) && fs->cap)
1013                 return cxgbe_set_hash_filter(dev, fs, ctx);
1014
1015         if (filter_id >= adapter->tids.nftids)
1016                 return -ERANGE;
1017
1018         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1019
1020         ret = cxgbe_validate_filter(adapter, fs);
1021         if (ret)
1022                 return ret;
1023
1024         /*
1025          * IPv6 filters occupy four slots and must be aligned on four-slot
1026          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
1027          * must be aligned on two-slot boundaries.
1028          *
1029          * IPv4 filters only occupy a single slot and have no alignment
1030          * requirements.
1031          */
1032         fidx = filter_id;
1033         if (fs->type == FILTER_TYPE_IPV6) {
1034                 if (chip_ver < CHELSIO_T6)
1035                         fidx &= ~(0x3);
1036                 else
1037                         fidx &= ~(0x1);
1038         }
1039
1040         if (fidx != filter_id)
1041                 return -EINVAL;
1042
1043         nentries = cxgbe_filter_slots(adapter, fs->type);
1044         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
1045         if (ret)
1046                 return -EBUSY;
1047
1048         iq = get_filter_steerq(dev, fs);
1049
1050         /*
1051          * Check to make sure that provided filter index is not
1052          * already in use by someone else
1053          */
1054         f = &adapter->tids.ftid_tab[filter_id];
1055         if (f->valid)
1056                 return -EBUSY;
1057
1058         fidx = adapter->tids.ftid_base + filter_id;
1059         ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
1060         if (ret)
1061                 return ret;
1062
1063         /*
1064          * Check to make sure the filter requested is writable ...
1065          */
1066         ret = writable_filter(f);
1067         if (ret) {
1068                 /* Clear the bits we have set above */
1069                 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1070                 return ret;
1071         }
1072
1073         /*
1074          * Allocate a clip table entry only if we have non-zero IPv6 address
1075          */
1076         if (chip_ver > CHELSIO_T5 && fs->type &&
1077             memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
1078                 f->clipt = cxgbe_clip_alloc(dev, (u32 *)&fs->val.lip);
1079                 if (!f->clipt)
1080                         goto free_tid;
1081         }
1082
1083         /*
1084          * Convert the filter specification into our internal format.
1085          * We copy the PF/VF specification into the Outer VLAN field
1086          * here so the rest of the code -- including the interface to
1087          * the firmware -- doesn't have to constantly do these checks.
1088          */
1089         f->fs = *fs;
1090         f->fs.iq = iq;
1091         f->dev = dev;
1092
1093         iconf = adapter->params.tp.ingress_config;
1094
1095         /* Either PFVF or OVLAN can be active, but not both
1096          * So, if PFVF is enabled, then overwrite the OVLAN
1097          * fields with PFVF fields before writing the spec
1098          * to hardware.
1099          */
1100         if (iconf & F_VNIC) {
1101                 f->fs.val.ovlan = fs->val.pf << 13 | fs->val.vf;
1102                 f->fs.mask.ovlan = fs->mask.pf << 13 | fs->mask.vf;
1103                 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1104                 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1105         }
1106
1107         /*
1108          * Attempt to set the filter.  If we don't succeed, we clear
1109          * it and return the failure.
1110          */
1111         f->ctx = ctx;
1112         f->tid = fidx; /* Save the actual tid */
1113         ret = set_filter_wr(dev, filter_id);
1114         if (ret)
1115                 goto free_tid;
1116
1117         return ret;
1118
1119 free_tid:
1120         cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1121         clear_filter(f);
1122         return ret;
1123 }
1124
1125 /**
1126  * Handle a Hash filter write reply.
1127  */
1128 void cxgbe_hash_filter_rpl(struct adapter *adap,
1129                            const struct cpl_act_open_rpl *rpl)
1130 {
1131         struct tid_info *t = &adap->tids;
1132         struct filter_entry *f;
1133         struct filter_ctx *ctx = NULL;
1134         unsigned int tid = GET_TID(rpl);
1135         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1136                                       (be32_to_cpu(rpl->atid_status)));
1137         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1138
1139         f = lookup_atid(t, ftid);
1140         if (!f) {
1141                 dev_warn(adap, "%s: could not find filter entry: %d\n",
1142                          __func__, ftid);
1143                 return;
1144         }
1145
1146         ctx = f->ctx;
1147         f->ctx = NULL;
1148
1149         switch (status) {
1150         case CPL_ERR_NONE: {
1151                 f->tid = tid;
1152                 f->pending = 0;  /* asynchronous setup completed */
1153                 f->valid = 1;
1154
1155                 cxgbe_insert_tid(t, f, f->tid, 0);
1156                 cxgbe_free_atid(t, ftid);
1157                 if (ctx) {
1158                         ctx->tid = f->tid;
1159                         ctx->result = 0;
1160                 }
1161                 if (f->fs.hitcnts)
1162                         set_tcb_field(adap, tid,
1163                                       W_TCB_TIMESTAMP,
1164                                       V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1165                                       V_TCB_T_RTT_TS_RECENT_AGE
1166                                               (M_TCB_T_RTT_TS_RECENT_AGE),
1167                                       V_TCB_TIMESTAMP(0ULL) |
1168                                       V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1169                                       1);
1170                 if (f->fs.newdmac)
1171                         set_tcb_tflag(adap, tid, S_TF_CCTRL_ECE, 1, 1);
1172                 if (f->fs.newvlan == VLAN_INSERT ||
1173                     f->fs.newvlan == VLAN_REWRITE)
1174                         set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1175                 if (f->fs.newsmac) {
1176                         set_tcb_tflag(adap, tid, S_TF_CCTRL_CWR, 1, 1);
1177                         set_tcb_field(adap, tid, W_TCB_SMAC_SEL,
1178                                       V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1179                                       V_TCB_SMAC_SEL(f->smt->hw_idx), 1);
1180                 }
1181                 break;
1182         }
1183         default:
1184                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1185                          __func__, status);
1186
1187                 if (ctx) {
1188                         if (status == CPL_ERR_TCAM_FULL)
1189                                 ctx->result = -EAGAIN;
1190                         else
1191                                 ctx->result = -EINVAL;
1192                 }
1193
1194                 cxgbe_free_atid(t, ftid);
1195                 t4_os_free(f);
1196         }
1197
1198         if (ctx)
1199                 t4_complete(&ctx->completion);
1200 }
1201
1202 /**
1203  * Handle a LE-TCAM filter write/deletion reply.
1204  */
1205 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1206 {
1207         struct filter_entry *f = NULL;
1208         unsigned int tid = GET_TID(rpl);
1209         int idx, max_fidx = adap->tids.nftids;
1210
1211         /* Get the corresponding filter entry for this tid */
1212         if (adap->tids.ftid_tab) {
1213                 /* Check this in normal filter region */
1214                 idx = tid - adap->tids.ftid_base;
1215                 if (idx >= max_fidx)
1216                         return;
1217
1218                 f = &adap->tids.ftid_tab[idx];
1219                 if (f->tid != tid)
1220                         return;
1221         }
1222
1223         /* We found the filter entry for this tid */
1224         if (f) {
1225                 unsigned int ret = G_COOKIE(rpl->cookie);
1226                 struct filter_ctx *ctx;
1227
1228                 /*
1229                  * Pull off any filter operation context attached to the
1230                  * filter.
1231                  */
1232                 ctx = f->ctx;
1233                 f->ctx = NULL;
1234
1235                 if (ret == FW_FILTER_WR_FLT_ADDED) {
1236                         f->pending = 0;  /* asynchronous setup completed */
1237                         f->valid = 1;
1238                         if (ctx) {
1239                                 ctx->tid = f->tid;
1240                                 ctx->result = 0;
1241                         }
1242                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1243                         /*
1244                          * Clear the filter when we get confirmation from the
1245                          * hardware that the filter has been deleted.
1246                          */
1247                         clear_filter(f);
1248                         if (ctx)
1249                                 ctx->result = 0;
1250                 } else {
1251                         /*
1252                          * Something went wrong.  Issue a warning about the
1253                          * problem and clear everything out.
1254                          */
1255                         dev_warn(adap, "filter %u setup failed with error %u\n",
1256                                  idx, ret);
1257                         clear_filter(f);
1258                         if (ctx)
1259                                 ctx->result = -EINVAL;
1260                 }
1261
1262                 if (ctx)
1263                         t4_complete(&ctx->completion);
1264         }
1265 }
1266
1267 /*
1268  * Retrieve the packet count for the specified filter.
1269  */
1270 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1271                            u64 *c, int hash, bool get_byte)
1272 {
1273         struct filter_entry *f;
1274         unsigned int tcb_base, tcbaddr;
1275         int ret;
1276
1277         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1278         if (is_hashfilter(adapter) && hash) {
1279                 if (fidx < adapter->tids.ntids) {
1280                         f = adapter->tids.tid_tab[fidx];
1281                         if (!f)
1282                                 return -EINVAL;
1283
1284                         if (is_t5(adapter->params.chip)) {
1285                                 *c = 0;
1286                                 return 0;
1287                         }
1288                         tcbaddr = tcb_base + (fidx * TCB_SIZE);
1289                         goto get_count;
1290                 } else {
1291                         return -ERANGE;
1292                 }
1293         } else {
1294                 if (fidx >= adapter->tids.nftids)
1295                         return -ERANGE;
1296
1297                 f = &adapter->tids.ftid_tab[fidx];
1298                 if (!f->valid)
1299                         return -EINVAL;
1300
1301                 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1302         }
1303
1304         f = &adapter->tids.ftid_tab[fidx];
1305         if (!f->valid)
1306                 return -EINVAL;
1307
1308 get_count:
1309         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1310                 /*
1311                  * For T5, the Filter Packet Hit Count is maintained as a
1312                  * 32-bit Big Endian value in the TCB field {timestamp}.
1313                  * Similar to the craziness above, instead of the filter hit
1314                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1315                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1316                  */
1317                 if (get_byte) {
1318                         unsigned int word_offset = 4;
1319                         __be64 be64_byte_count;
1320
1321                         t4_os_lock(&adapter->win0_lock);
1322                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1323                                            tcbaddr +
1324                                            (word_offset * sizeof(__be32)),
1325                                            sizeof(be64_byte_count),
1326                                            &be64_byte_count,
1327                                            T4_MEMORY_READ);
1328                         t4_os_unlock(&adapter->win0_lock);
1329                         if (ret < 0)
1330                                 return ret;
1331                         *c = be64_to_cpu(be64_byte_count);
1332                 } else {
1333                         unsigned int word_offset = 6;
1334                         __be32 be32_count;
1335
1336                         t4_os_lock(&adapter->win0_lock);
1337                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1338                                            tcbaddr +
1339                                            (word_offset * sizeof(__be32)),
1340                                            sizeof(be32_count), &be32_count,
1341                                            T4_MEMORY_READ);
1342                         t4_os_unlock(&adapter->win0_lock);
1343                         if (ret < 0)
1344                                 return ret;
1345                         *c = (u64)be32_to_cpu(be32_count);
1346                 }
1347         }
1348         return 0;
1349 }
1350
1351 /*
1352  * Clear the packet count for the specified filter.
1353  */
1354 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1355                              int hash, bool clear_byte)
1356 {
1357         u64 tcb_mask = 0, tcb_val = 0;
1358         struct filter_entry *f = NULL;
1359         u16 tcb_word = 0;
1360
1361         if (is_hashfilter(adapter) && hash) {
1362                 if (fidx >= adapter->tids.ntids)
1363                         return -ERANGE;
1364
1365                 /* No hitcounts supported for T5 hashfilters */
1366                 if (is_t5(adapter->params.chip))
1367                         return 0;
1368
1369                 f = adapter->tids.tid_tab[fidx];
1370         } else {
1371                 if (fidx >= adapter->tids.nftids)
1372                         return -ERANGE;
1373
1374                 f = &adapter->tids.ftid_tab[fidx];
1375         }
1376
1377         if (!f || !f->valid)
1378                 return -EINVAL;
1379
1380         tcb_word = W_TCB_TIMESTAMP;
1381         tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1382         tcb_val = V_TCB_TIMESTAMP(0ULL);
1383
1384         set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1385
1386         if (clear_byte) {
1387                 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1388                 tcb_mask =
1389                         V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1390                         V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1391                 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1392                           V_TCB_T_RTSEQ_RECENT(0ULL);
1393
1394                 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1395         }
1396
1397         return 0;
1398 }
1399
1400 /**
1401  * Handle a Hash filter delete reply.
1402  */
1403 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1404                                const struct cpl_abort_rpl_rss *rpl)
1405 {
1406         struct tid_info *t = &adap->tids;
1407         struct filter_entry *f;
1408         struct filter_ctx *ctx = NULL;
1409         unsigned int tid = GET_TID(rpl);
1410
1411         f = lookup_tid(t, tid);
1412         if (!f) {
1413                 dev_warn(adap, "%s: could not find filter entry: %u\n",
1414                          __func__, tid);
1415                 return;
1416         }
1417
1418         ctx = f->ctx;
1419         f->ctx = NULL;
1420
1421         f->valid = 0;
1422
1423         if (f->clipt)
1424                 cxgbe_clip_release(f->dev, f->clipt);
1425
1426         cxgbe_remove_tid(t, 0, tid, 0);
1427         t4_os_free(f);
1428
1429         if (ctx) {
1430                 ctx->result = 0;
1431                 t4_complete(&ctx->completion);
1432         }
1433 }