c5f5e41e366bd336afab608aa3e48ea133e9951e
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6
7 #include "base/common.h"
8 #include "base/t4_tcb.h"
9 #include "base/t4_regs.h"
10 #include "cxgbe_filter.h"
11 #include "clip_tbl.h"
12 #include "l2t.h"
13 #include "smt.h"
14
15 /**
16  * Initialize Hash Filters
17  */
18 int cxgbe_init_hash_filter(struct adapter *adap)
19 {
20         unsigned int n_user_filters;
21         unsigned int user_filter_perc;
22         int ret;
23         u32 params[7], val[7];
24
25 #define FW_PARAM_DEV(param) \
26         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
27         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
28
29 #define FW_PARAM_PFVF(param) \
30         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
31         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
32         V_FW_PARAMS_PARAM_Y(0) | \
33         V_FW_PARAMS_PARAM_Z(0))
34
35         params[0] = FW_PARAM_DEV(NTID);
36         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
37                               params, val);
38         if (ret < 0)
39                 return ret;
40         adap->tids.ntids = val[0];
41         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
42
43         user_filter_perc = 100;
44         n_user_filters = mult_frac(adap->tids.nftids,
45                                    user_filter_perc,
46                                    100);
47
48         adap->tids.nftids = n_user_filters;
49         adap->params.hash_filter = 1;
50         return 0;
51 }
52
53 /**
54  * Validate if the requested filter specification can be set by checking
55  * if the requested features have been enabled
56  */
57 int cxgbe_validate_filter(struct adapter *adapter,
58                           struct ch_filter_specification *fs)
59 {
60         u32 fconf, iconf;
61
62         /*
63          * Check for unconfigured fields being used.
64          */
65         fconf = adapter->params.tp.vlan_pri_map;
66
67         iconf = adapter->params.tp.ingress_config;
68
69 #define S(_field) \
70         (fs->val._field || fs->mask._field)
71 #define U(_mask, _field) \
72         (!(fconf & (_mask)) && S(_field))
73
74         if (U(F_PORT, iport) || U(F_ETHERTYPE, ethtype) ||
75             U(F_PROTOCOL, proto) || U(F_MACMATCH, macidx) ||
76             U(F_VLAN, ivlan_vld) || U(F_VNIC_ID, ovlan_vld) ||
77             U(F_TOS, tos) || U(F_VNIC_ID, pfvf_vld))
78                 return -EOPNOTSUPP;
79
80         /* Either OVLAN or PFVF match is enabled in hardware, but not both */
81         if ((S(pfvf_vld) && !(iconf & F_VNIC)) ||
82             (S(ovlan_vld) && (iconf & F_VNIC)))
83                 return -EOPNOTSUPP;
84
85         /* To use OVLAN or PFVF, L4 encapsulation match must not be enabled */
86         if ((S(ovlan_vld) && (iconf & F_USE_ENC_IDX)) ||
87             (S(pfvf_vld) && (iconf & F_USE_ENC_IDX)))
88                 return -EOPNOTSUPP;
89
90 #undef S
91 #undef U
92
93         /*
94          * If the user is requesting that the filter action loop
95          * matching packets back out one of our ports, make sure that
96          * the egress port is in range.
97          */
98         if (fs->action == FILTER_SWITCH &&
99             fs->eport >= adapter->params.nports)
100                 return -ERANGE;
101
102         /*
103          * Don't allow various trivially obvious bogus out-of-range
104          * values ...
105          */
106         if (fs->val.iport >= adapter->params.nports)
107                 return -ERANGE;
108
109         if (!fs->cap && fs->nat_mode && !adapter->params.filter2_wr_support)
110                 return -EOPNOTSUPP;
111
112         if (!fs->cap && fs->swapmac && !adapter->params.filter2_wr_support)
113                 return -EOPNOTSUPP;
114
115         return 0;
116 }
117
118 /**
119  * Get the queue to which the traffic must be steered to.
120  */
121 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
122                                       struct ch_filter_specification *fs)
123 {
124         struct port_info *pi = ethdev2pinfo(dev);
125         struct adapter *adapter = pi->adapter;
126         unsigned int iq;
127
128         /*
129          * If the user has requested steering matching Ingress Packets
130          * to a specific Queue Set, we need to make sure it's in range
131          * for the port and map that into the Absolute Queue ID of the
132          * Queue Set's Response Queue.
133          */
134         if (!fs->dirsteer) {
135                 iq = 0;
136         } else {
137                 /*
138                  * If the iq id is greater than the number of qsets,
139                  * then assume it is an absolute qid.
140                  */
141                 if (fs->iq < pi->n_rx_qsets)
142                         iq = adapter->sge.ethrxq[pi->first_qset +
143                                                  fs->iq].rspq.abs_id;
144                 else
145                         iq = fs->iq;
146         }
147
148         return iq;
149 }
150
151 /* Return an error number if the indicated filter isn't writable ... */
152 static int writable_filter(struct filter_entry *f)
153 {
154         if (f->locked)
155                 return -EPERM;
156         if (f->pending)
157                 return -EBUSY;
158
159         return 0;
160 }
161
162 /**
163  * Send CPL_SET_TCB_FIELD message
164  */
165 static void set_tcb_field(struct adapter *adapter, unsigned int ftid,
166                           u16 word, u64 mask, u64 val, int no_reply)
167 {
168         struct rte_mbuf *mbuf;
169         struct cpl_set_tcb_field *req;
170         struct sge_ctrl_txq *ctrlq;
171
172         ctrlq = &adapter->sge.ctrlq[0];
173         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
174         WARN_ON(!mbuf);
175
176         mbuf->data_len = sizeof(*req);
177         mbuf->pkt_len = mbuf->data_len;
178
179         req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
180         memset(req, 0, sizeof(*req));
181         INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, ftid);
182         req->reply_ctrl = cpu_to_be16(V_REPLY_CHAN(0) |
183                                       V_QUEUENO(adapter->sge.fw_evtq.abs_id) |
184                                       V_NO_REPLY(no_reply));
185         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(ftid));
186         req->mask = cpu_to_be64(mask);
187         req->val = cpu_to_be64(val);
188
189         t4_mgmt_tx(ctrlq, mbuf);
190 }
191
192 /**
193  * Set one of the t_flags bits in the TCB.
194  */
195 static void set_tcb_tflag(struct adapter *adap, unsigned int ftid,
196                           unsigned int bit_pos, unsigned int val, int no_reply)
197 {
198         set_tcb_field(adap, ftid,  W_TCB_T_FLAGS, 1ULL << bit_pos,
199                       (unsigned long long)val << bit_pos, no_reply);
200 }
201
202 /**
203  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
204  */
205 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
206                                         struct cpl_set_tcb_field *req,
207                                         unsigned int word,
208                                         u64 mask, u64 val, u8 cookie,
209                                         int no_reply)
210 {
211         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
212         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
213
214         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
215                                       V_ULP_TXPKT_DEST(0));
216         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
217         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
218         sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
219         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
220         req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
221                                       V_QUEUENO(0));
222         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
223         req->mask = cpu_to_be64(mask);
224         req->val = cpu_to_be64(val);
225         sc = (struct ulptx_idata *)(req + 1);
226         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
227         sc->len = cpu_to_be32(0);
228 }
229
230 /**
231  * IPv6 requires 2 slots on T6 and 4 slots for cards below T6.
232  * IPv4 requires only 1 slot on all cards.
233  */
234 u8 cxgbe_filter_slots(struct adapter *adap, u8 family)
235 {
236         if (family == FILTER_TYPE_IPV6) {
237                 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
238                         return 4;
239
240                 return 2;
241         }
242
243         return 1;
244 }
245
246 /**
247  * Check if entries are already filled.
248  */
249 bool cxgbe_is_filter_set(struct tid_info *t, u32 fidx, u8 nentries)
250 {
251         bool result = FALSE;
252         u32 i;
253
254         /* Ensure there's enough slots available. */
255         t4_os_lock(&t->ftid_lock);
256         for (i = fidx; i < fidx + nentries; i++) {
257                 if (rte_bitmap_get(t->ftid_bmap, i)) {
258                         result = TRUE;
259                         break;
260                 }
261         }
262         t4_os_unlock(&t->ftid_lock);
263         return result;
264 }
265
266 /**
267  * Allocate available free entries.
268  */
269 int cxgbe_alloc_ftid(struct adapter *adap, u8 nentries)
270 {
271         struct tid_info *t = &adap->tids;
272         int pos;
273         int size = t->nftids;
274
275         t4_os_lock(&t->ftid_lock);
276         if (nentries > 1)
277                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size,
278                                                     nentries);
279         else
280                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
281         t4_os_unlock(&t->ftid_lock);
282
283         return pos < size ? pos : -1;
284 }
285
286 /**
287  * Construct hash filter ntuple.
288  */
289 static u64 hash_filter_ntuple(const struct filter_entry *f)
290 {
291         struct adapter *adap = ethdev2adap(f->dev);
292         struct tp_params *tp = &adap->params.tp;
293         u64 ntuple = 0;
294         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
295
296         if (tp->port_shift >= 0 && f->fs.mask.iport)
297                 ntuple |= (u64)f->fs.val.iport << tp->port_shift;
298
299         if (tp->protocol_shift >= 0) {
300                 if (!f->fs.val.proto)
301                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
302                 else
303                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
304         }
305
306         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
307                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
308         if (tp->macmatch_shift >= 0 && f->fs.mask.macidx)
309                 ntuple |= (u64)(f->fs.val.macidx) << tp->macmatch_shift;
310         if (tp->vlan_shift >= 0 && f->fs.mask.ivlan)
311                 ntuple |= (u64)(F_FT_VLAN_VLD | f->fs.val.ivlan) <<
312                           tp->vlan_shift;
313         if (tp->vnic_shift >= 0) {
314                 if ((adap->params.tp.ingress_config & F_VNIC) &&
315                     f->fs.mask.pfvf_vld)
316                         ntuple |= (u64)(f->fs.val.pfvf_vld << 16 |
317                                         f->fs.val.pf << 13 | f->fs.val.vf) <<
318                                         tp->vnic_shift;
319                 else if (!(adap->params.tp.ingress_config & F_VNIC) &&
320                          f->fs.mask.ovlan_vld)
321                         ntuple |= (u64)(f->fs.val.ovlan_vld << 16 |
322                                         f->fs.val.ovlan) << tp->vnic_shift;
323         }
324         if (tp->tos_shift >= 0 && f->fs.mask.tos)
325                 ntuple |= (u64)f->fs.val.tos << tp->tos_shift;
326
327         return ntuple;
328 }
329
330 /**
331  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
332  */
333 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
334                              unsigned int tid)
335 {
336         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
337         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
338
339         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
340                                       V_ULP_TXPKT_DEST(0));
341         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
342         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
343         sc->len = cpu_to_be32(sizeof(*abort_req) -
344                               sizeof(struct work_request_hdr));
345         OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
346         abort_req->rsvd0 = cpu_to_be32(0);
347         abort_req->rsvd1 = 0;
348         abort_req->cmd = CPL_ABORT_NO_RST;
349         sc = (struct ulptx_idata *)(abort_req + 1);
350         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
351         sc->len = cpu_to_be32(0);
352 }
353
354 /**
355  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
356  */
357 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
358                              unsigned int tid)
359 {
360         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
361         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
362
363         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
364                                       V_ULP_TXPKT_DEST(0));
365         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
366         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
367         sc->len = cpu_to_be32(sizeof(*abort_rpl) -
368                               sizeof(struct work_request_hdr));
369         OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
370         abort_rpl->rsvd0 = cpu_to_be32(0);
371         abort_rpl->rsvd1 = 0;
372         abort_rpl->cmd = CPL_ABORT_NO_RST;
373         sc = (struct ulptx_idata *)(abort_rpl + 1);
374         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
375         sc->len = cpu_to_be32(0);
376 }
377
378 /**
379  * Delete the specified hash filter.
380  */
381 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
382                                  unsigned int filter_id,
383                                  struct filter_ctx *ctx)
384 {
385         struct adapter *adapter = ethdev2adap(dev);
386         struct tid_info *t = &adapter->tids;
387         struct filter_entry *f;
388         struct sge_ctrl_txq *ctrlq;
389         unsigned int port_id = ethdev2pinfo(dev)->port_id;
390         int ret;
391
392         if (filter_id > adapter->tids.ntids)
393                 return -E2BIG;
394
395         f = lookup_tid(t, filter_id);
396         if (!f) {
397                 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
398                         __func__, filter_id);
399                 return -EINVAL;
400         }
401
402         ret = writable_filter(f);
403         if (ret)
404                 return ret;
405
406         if (f->valid) {
407                 unsigned int wrlen;
408                 struct rte_mbuf *mbuf;
409                 struct work_request_hdr *wr;
410                 struct ulptx_idata *aligner;
411                 struct cpl_set_tcb_field *req;
412                 struct cpl_abort_req *abort_req;
413                 struct cpl_abort_rpl *abort_rpl;
414
415                 f->ctx = ctx;
416                 f->pending = 1;
417
418                 wrlen = cxgbe_roundup(sizeof(*wr) +
419                                       (sizeof(*req) + sizeof(*aligner)) +
420                                       sizeof(*abort_req) + sizeof(*abort_rpl),
421                                       16);
422
423                 ctrlq = &adapter->sge.ctrlq[port_id];
424                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
425                 if (!mbuf) {
426                         dev_err(adapter, "%s: could not allocate skb ..\n",
427                                 __func__);
428                         goto out_err;
429                 }
430
431                 mbuf->data_len = wrlen;
432                 mbuf->pkt_len = mbuf->data_len;
433
434                 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
435                 INIT_ULPTX_WR(req, wrlen, 0, 0);
436                 wr = (struct work_request_hdr *)req;
437                 wr++;
438                 req = (struct cpl_set_tcb_field *)wr;
439                 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
440                                 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
441                                 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
442                                 0, 1);
443                 aligner = (struct ulptx_idata *)(req + 1);
444                 abort_req = (struct cpl_abort_req *)(aligner + 1);
445                 mk_abort_req_ulp(abort_req, f->tid);
446                 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
447                 mk_abort_rpl_ulp(abort_rpl, f->tid);
448                 t4_mgmt_tx(ctrlq, mbuf);
449         }
450         return 0;
451
452 out_err:
453         return -ENOMEM;
454 }
455
456 /**
457  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
458  */
459 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
460                              unsigned int qid_filterid, struct adapter *adap)
461 {
462         struct cpl_t6_act_open_req6 *req = NULL;
463         u64 local_lo, local_hi, peer_lo, peer_hi;
464         u32 *lip = (u32 *)f->fs.val.lip;
465         u32 *fip = (u32 *)f->fs.val.fip;
466
467         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
468         case CHELSIO_T6:
469                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
470
471                 INIT_TP_WR(req, 0);
472                 break;
473         default:
474                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
475                 return;
476         }
477
478         local_hi = ((u64)lip[1]) << 32 | lip[0];
479         local_lo = ((u64)lip[3]) << 32 | lip[2];
480         peer_hi = ((u64)fip[1]) << 32 | fip[0];
481         peer_lo = ((u64)fip[3]) << 32 | fip[2];
482
483         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
484                                                     qid_filterid));
485         req->local_port = cpu_to_be16(f->fs.val.lport);
486         req->peer_port = cpu_to_be16(f->fs.val.fport);
487         req->local_ip_hi = local_hi;
488         req->local_ip_lo = local_lo;
489         req->peer_ip_hi = peer_hi;
490         req->peer_ip_lo = peer_lo;
491         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
492                                         f->fs.newvlan == VLAN_REWRITE) |
493                                 V_DELACK(f->fs.hitcnts) |
494                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
495                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
496                                            << 1) |
497                                 V_TX_CHAN(f->fs.eport) |
498                                 V_ULP_MODE(ULP_MODE_NONE) |
499                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
500         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
501         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
502                             V_RSS_QUEUE(f->fs.iq) |
503                             F_T5_OPT_2_VALID |
504                             F_RX_CHANNEL |
505                             V_SACK_EN(f->fs.swapmac) |
506                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
507                                          (f->fs.dirsteer << 1)) |
508                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
509 }
510
511 /**
512  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
513  */
514 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
515                             unsigned int qid_filterid, struct adapter *adap)
516 {
517         struct cpl_t6_act_open_req *req = NULL;
518
519         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
520         case CHELSIO_T6:
521                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
522
523                 INIT_TP_WR(req, 0);
524                 break;
525         default:
526                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
527                 return;
528         }
529
530         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
531                                                     qid_filterid));
532         req->local_port = cpu_to_be16(f->fs.val.lport);
533         req->peer_port = cpu_to_be16(f->fs.val.fport);
534         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
535                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
536         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
537                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
538         req->opt0 = cpu_to_be64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
539                                         f->fs.newvlan == VLAN_REWRITE) |
540                                 V_DELACK(f->fs.hitcnts) |
541                                 V_L2T_IDX(f->l2t ? f->l2t->idx : 0) |
542                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
543                                            << 1) |
544                                 V_TX_CHAN(f->fs.eport) |
545                                 V_ULP_MODE(ULP_MODE_NONE) |
546                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
547         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
548         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
549                             V_RSS_QUEUE(f->fs.iq) |
550                             F_T5_OPT_2_VALID |
551                             F_RX_CHANNEL |
552                             V_SACK_EN(f->fs.swapmac) |
553                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
554                                          (f->fs.dirsteer << 1)) |
555                             V_CCTRL_ECN(f->fs.action == FILTER_SWITCH));
556 }
557
558 /**
559  * Set the specified hash filter.
560  */
561 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
562                                  struct ch_filter_specification *fs,
563                                  struct filter_ctx *ctx)
564 {
565         struct port_info *pi = ethdev2pinfo(dev);
566         struct adapter *adapter = pi->adapter;
567         struct tid_info *t = &adapter->tids;
568         struct filter_entry *f;
569         struct rte_mbuf *mbuf;
570         struct sge_ctrl_txq *ctrlq;
571         unsigned int iq;
572         int atid, size;
573         int ret = 0;
574
575         ret = cxgbe_validate_filter(adapter, fs);
576         if (ret)
577                 return ret;
578
579         iq = get_filter_steerq(dev, fs);
580
581         ctrlq = &adapter->sge.ctrlq[pi->port_id];
582
583         f = t4_os_alloc(sizeof(*f));
584         if (!f)
585                 goto out_err;
586
587         f->fs = *fs;
588         f->ctx = ctx;
589         f->dev = dev;
590         f->fs.iq = iq;
591
592         /*
593          * If the new filter requires loopback Destination MAC and/or VLAN
594          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
595          * the filter.
596          */
597         if (f->fs.newdmac || f->fs.newvlan == VLAN_INSERT ||
598             f->fs.newvlan == VLAN_REWRITE) {
599                 /* allocate L2T entry for new filter */
600                 f->l2t = cxgbe_l2t_alloc_switching(dev, f->fs.vlan,
601                                                    f->fs.eport, f->fs.dmac);
602                 if (!f->l2t) {
603                         ret = -ENOMEM;
604                         goto out_err;
605                 }
606         }
607
608         /* If the new filter requires Source MAC rewriting then we need to
609          * allocate a SMT entry for the filter
610          */
611         if (f->fs.newsmac) {
612                 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
613                 if (!f->smt) {
614                         ret = -EAGAIN;
615                         goto out_err;
616                 }
617         }
618
619         atid = cxgbe_alloc_atid(t, f);
620         if (atid < 0)
621                 goto out_err;
622
623         if (f->fs.type == FILTER_TYPE_IPV6) {
624                 /* IPv6 hash filter */
625                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
626                 if (!f->clipt)
627                         goto free_atid;
628
629                 size = sizeof(struct cpl_t6_act_open_req6);
630                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
631                 if (!mbuf) {
632                         ret = -ENOMEM;
633                         goto free_clip;
634                 }
635
636                 mbuf->data_len = size;
637                 mbuf->pkt_len = mbuf->data_len;
638
639                 mk_act_open_req6(f, mbuf,
640                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
641                                  adapter);
642         } else {
643                 /* IPv4 hash filter */
644                 size = sizeof(struct cpl_t6_act_open_req);
645                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
646                 if (!mbuf) {
647                         ret = -ENOMEM;
648                         goto free_atid;
649                 }
650
651                 mbuf->data_len = size;
652                 mbuf->pkt_len = mbuf->data_len;
653
654                 mk_act_open_req(f, mbuf,
655                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
656                                 adapter);
657         }
658
659         f->pending = 1;
660         t4_mgmt_tx(ctrlq, mbuf);
661         return 0;
662
663 free_clip:
664         cxgbe_clip_release(f->dev, f->clipt);
665 free_atid:
666         cxgbe_free_atid(t, atid);
667
668 out_err:
669         t4_os_free(f);
670         return ret;
671 }
672
673 /**
674  * Clear a filter and release any of its resources that we own.  This also
675  * clears the filter's "pending" status.
676  */
677 static void clear_filter(struct filter_entry *f)
678 {
679         if (f->clipt)
680                 cxgbe_clip_release(f->dev, f->clipt);
681
682         /*
683          * The zeroing of the filter rule below clears the filter valid,
684          * pending, locked flags etc. so it's all we need for
685          * this operation.
686          */
687         memset(f, 0, sizeof(*f));
688 }
689
690 /**
691  * t4_mk_filtdelwr - create a delete filter WR
692  * @adap: adapter context
693  * @ftid: the filter ID
694  * @wr: the filter work request to populate
695  * @qid: ingress queue to receive the delete notification
696  *
697  * Creates a filter work request to delete the supplied filter.  If @qid is
698  * negative the delete notification is suppressed.
699  */
700 static void t4_mk_filtdelwr(struct adapter *adap, unsigned int ftid,
701                             struct fw_filter2_wr *wr, int qid)
702 {
703         memset(wr, 0, sizeof(*wr));
704         if (adap->params.filter2_wr_support)
705                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
706         else
707                 wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
708         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
709         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
710                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
711         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
712         if (qid >= 0)
713                 wr->rx_chan_rx_rpl_iq =
714                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
715 }
716
717 /**
718  * Create FW work request to delete the filter at a specified index
719  */
720 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
721 {
722         struct adapter *adapter = ethdev2adap(dev);
723         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
724         struct rte_mbuf *mbuf;
725         struct fw_filter2_wr *fwr;
726         struct sge_ctrl_txq *ctrlq;
727         unsigned int port_id = ethdev2pinfo(dev)->port_id;
728
729         ctrlq = &adapter->sge.ctrlq[port_id];
730         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
731         if (!mbuf)
732                 return -ENOMEM;
733
734         mbuf->data_len = sizeof(*fwr);
735         mbuf->pkt_len = mbuf->data_len;
736
737         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
738         t4_mk_filtdelwr(adapter, f->tid, fwr, adapter->sge.fw_evtq.abs_id);
739
740         /*
741          * Mark the filter as "pending" and ship off the Filter Work Request.
742          * When we get the Work Request Reply we'll clear the pending status.
743          */
744         f->pending = 1;
745         t4_mgmt_tx(ctrlq, mbuf);
746         return 0;
747 }
748
749 static int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
750 {
751         struct adapter *adapter = ethdev2adap(dev);
752         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
753         struct rte_mbuf *mbuf;
754         struct fw_filter2_wr *fwr;
755         struct sge_ctrl_txq *ctrlq;
756         unsigned int port_id = ethdev2pinfo(dev)->port_id;
757         int ret;
758
759         /*
760          * If the new filter requires loopback Destination MAC and/or VLAN
761          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
762          * the filter.
763          */
764         if (f->fs.newvlan || f->fs.newdmac) {
765                 /* allocate L2T entry for new filter */
766                 f->l2t = cxgbe_l2t_alloc_switching(f->dev, f->fs.vlan,
767                                                    f->fs.eport, f->fs.dmac);
768
769                 if (!f->l2t)
770                         return -ENOMEM;
771         }
772
773         /* If the new filter requires Source MAC rewriting then we need to
774          * allocate a SMT entry for the filter
775          */
776         if (f->fs.newsmac) {
777                 f->smt = cxgbe_smt_alloc_switching(f->dev, f->fs.smac);
778                 if (!f->smt) {
779                         if (f->l2t) {
780                                 cxgbe_l2t_release(f->l2t);
781                                 f->l2t = NULL;
782                         }
783                         return -ENOMEM;
784                 }
785         }
786
787         ctrlq = &adapter->sge.ctrlq[port_id];
788         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
789         if (!mbuf) {
790                 ret = -ENOMEM;
791                 goto out;
792         }
793
794         mbuf->data_len = sizeof(*fwr);
795         mbuf->pkt_len = mbuf->data_len;
796
797         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter2_wr *);
798         memset(fwr, 0, sizeof(*fwr));
799
800         /*
801          * Construct the work request to set the filter.
802          */
803         if (adapter->params.filter2_wr_support)
804                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER2_WR));
805         else
806                 fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
807         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
808         fwr->tid_to_iq =
809                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
810                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
811                             V_FW_FILTER_WR_NOREPLY(0) |
812                             V_FW_FILTER_WR_IQ(f->fs.iq));
813         fwr->del_filter_to_l2tix =
814                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
815                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
816                             V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
817                             V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
818                             V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
819                             V_FW_FILTER_WR_INSVLAN
820                                 (f->fs.newvlan == VLAN_INSERT ||
821                                  f->fs.newvlan == VLAN_REWRITE) |
822                             V_FW_FILTER_WR_RMVLAN
823                                 (f->fs.newvlan == VLAN_REMOVE ||
824                                  f->fs.newvlan == VLAN_REWRITE) |
825                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
826                             V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
827                             V_FW_FILTER_WR_PRIO(f->fs.prio) |
828                             V_FW_FILTER_WR_L2TIX(f->l2t ? f->l2t->idx : 0));
829         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
830         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
831         fwr->frag_to_ovlan_vldm =
832                 (V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.ivlan_vld) |
833                  V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.ivlan_vld) |
834                  V_FW_FILTER_WR_OVLAN_VLD(f->fs.val.ovlan_vld) |
835                  V_FW_FILTER_WR_OVLAN_VLDM(f->fs.mask.ovlan_vld));
836         fwr->smac_sel = f->smt ? f->smt->hw_idx : 0;
837         fwr->rx_chan_rx_rpl_iq =
838                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
839                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
840                                                      ));
841         fwr->maci_to_matchtypem =
842                 cpu_to_be32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
843                             V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
844                             V_FW_FILTER_WR_PORT(f->fs.val.iport) |
845                             V_FW_FILTER_WR_PORTM(f->fs.mask.iport));
846         fwr->ptcl = f->fs.val.proto;
847         fwr->ptclm = f->fs.mask.proto;
848         fwr->ttyp = f->fs.val.tos;
849         fwr->ttypm = f->fs.mask.tos;
850         fwr->ivlan = cpu_to_be16(f->fs.val.ivlan);
851         fwr->ivlanm = cpu_to_be16(f->fs.mask.ivlan);
852         fwr->ovlan = cpu_to_be16(f->fs.val.ovlan);
853         fwr->ovlanm = cpu_to_be16(f->fs.mask.ovlan);
854         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
855         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
856         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
857         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
858         fwr->lp = cpu_to_be16(f->fs.val.lport);
859         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
860         fwr->fp = cpu_to_be16(f->fs.val.fport);
861         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
862
863         if (adapter->params.filter2_wr_support) {
864                 fwr->filter_type_swapmac =
865                          V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
866                 fwr->natmode_to_ulp_type =
867                         V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
868                                                  ULP_MODE_TCPDDP :
869                                                  ULP_MODE_NONE) |
870                         V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
871                 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
872                 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
873                 fwr->newlport = cpu_to_be16(f->fs.nat_lport);
874                 fwr->newfport = cpu_to_be16(f->fs.nat_fport);
875         }
876
877         /*
878          * Mark the filter as "pending" and ship off the Filter Work Request.
879          * When we get the Work Request Reply we'll clear the pending status.
880          */
881         f->pending = 1;
882         t4_mgmt_tx(ctrlq, mbuf);
883         return 0;
884
885 out:
886         return ret;
887 }
888
889 /**
890  * Set the corresponding entries in the bitmap.
891  */
892 static int cxgbe_set_ftid(struct tid_info *t, u32 fidx, u8 nentries)
893 {
894         u32 i;
895
896         t4_os_lock(&t->ftid_lock);
897         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
898                 t4_os_unlock(&t->ftid_lock);
899                 return -EBUSY;
900         }
901
902         for (i = fidx; i < fidx + nentries; i++)
903                 rte_bitmap_set(t->ftid_bmap, i);
904         t4_os_unlock(&t->ftid_lock);
905         return 0;
906 }
907
908 /**
909  * Clear the corresponding entries in the bitmap.
910  */
911 static void cxgbe_clear_ftid(struct tid_info *t, u32 fidx, u8 nentries)
912 {
913         u32 i;
914
915         t4_os_lock(&t->ftid_lock);
916         for (i = fidx; i < fidx + nentries; i++)
917                 rte_bitmap_clear(t->ftid_bmap, i);
918         t4_os_unlock(&t->ftid_lock);
919 }
920
921 /**
922  * Check a delete filter request for validity and send it to the hardware.
923  * Return 0 on success, an error number otherwise.  We attach any provided
924  * filter operation context to the internal filter specification in order to
925  * facilitate signaling completion of the operation.
926  */
927 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
928                      struct ch_filter_specification *fs,
929                      struct filter_ctx *ctx)
930 {
931         struct port_info *pi = dev->data->dev_private;
932         struct adapter *adapter = pi->adapter;
933         struct filter_entry *f;
934         unsigned int chip_ver;
935         u8 nentries;
936         int ret;
937
938         if (is_hashfilter(adapter) && fs->cap)
939                 return cxgbe_del_hash_filter(dev, filter_id, ctx);
940
941         if (filter_id >= adapter->tids.nftids)
942                 return -ERANGE;
943
944         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
945
946         /*
947          * Ensure IPv6 filter id is aligned on the 2 slot boundary for T6,
948          * and 4 slot boundary for cards below T6.
949          */
950         if (fs->type == FILTER_TYPE_IPV6) {
951                 if (chip_ver < CHELSIO_T6)
952                         filter_id &= ~(0x3);
953                 else
954                         filter_id &= ~(0x1);
955         }
956
957         nentries = cxgbe_filter_slots(adapter, fs->type);
958         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
959         if (!ret) {
960                 dev_warn(adap, "%s: could not find filter entry: %u\n",
961                          __func__, filter_id);
962                 return -EINVAL;
963         }
964
965         f = &adapter->tids.ftid_tab[filter_id];
966         ret = writable_filter(f);
967         if (ret)
968                 return ret;
969
970         if (f->valid) {
971                 f->ctx = ctx;
972                 cxgbe_clear_ftid(&adapter->tids,
973                                  f->tid - adapter->tids.ftid_base,
974                                  nentries);
975                 return del_filter_wr(dev, filter_id);
976         }
977
978         /*
979          * If the caller has passed in a Completion Context then we need to
980          * mark it as a successful completion so they don't stall waiting
981          * for it.
982          */
983         if (ctx) {
984                 ctx->result = 0;
985                 t4_complete(&ctx->completion);
986         }
987
988         return 0;
989 }
990
991 /**
992  * Check a Chelsio Filter Request for validity, convert it into our internal
993  * format and send it to the hardware.  Return 0 on success, an error number
994  * otherwise.  We attach any provided filter operation context to the internal
995  * filter specification in order to facilitate signaling completion of the
996  * operation.
997  */
998 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
999                      struct ch_filter_specification *fs,
1000                      struct filter_ctx *ctx)
1001 {
1002         struct port_info *pi = ethdev2pinfo(dev);
1003         struct adapter *adapter = pi->adapter;
1004         u8 nentries, bitoff[16] = {0};
1005         struct filter_entry *f;
1006         unsigned int chip_ver;
1007         unsigned int fidx, iq;
1008         u32 iconf;
1009         int ret;
1010
1011         if (is_hashfilter(adapter) && fs->cap)
1012                 return cxgbe_set_hash_filter(dev, fs, ctx);
1013
1014         if (filter_id >= adapter->tids.nftids)
1015                 return -ERANGE;
1016
1017         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1018
1019         ret = cxgbe_validate_filter(adapter, fs);
1020         if (ret)
1021                 return ret;
1022
1023         /*
1024          * IPv6 filters occupy four slots and must be aligned on four-slot
1025          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
1026          * must be aligned on two-slot boundaries.
1027          *
1028          * IPv4 filters only occupy a single slot and have no alignment
1029          * requirements.
1030          */
1031         fidx = filter_id;
1032         if (fs->type == FILTER_TYPE_IPV6) {
1033                 if (chip_ver < CHELSIO_T6)
1034                         fidx &= ~(0x3);
1035                 else
1036                         fidx &= ~(0x1);
1037         }
1038
1039         if (fidx != filter_id)
1040                 return -EINVAL;
1041
1042         nentries = cxgbe_filter_slots(adapter, fs->type);
1043         ret = cxgbe_is_filter_set(&adapter->tids, filter_id, nentries);
1044         if (ret)
1045                 return -EBUSY;
1046
1047         iq = get_filter_steerq(dev, fs);
1048
1049         /*
1050          * Check to make sure that provided filter index is not
1051          * already in use by someone else
1052          */
1053         f = &adapter->tids.ftid_tab[filter_id];
1054         if (f->valid)
1055                 return -EBUSY;
1056
1057         fidx = adapter->tids.ftid_base + filter_id;
1058         ret = cxgbe_set_ftid(&adapter->tids, filter_id, nentries);
1059         if (ret)
1060                 return ret;
1061
1062         /*
1063          * Check to make sure the filter requested is writable ...
1064          */
1065         ret = writable_filter(f);
1066         if (ret) {
1067                 /* Clear the bits we have set above */
1068                 cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1069                 return ret;
1070         }
1071
1072         /*
1073          * Allocate a clip table entry only if we have non-zero IPv6 address
1074          */
1075         if (chip_ver > CHELSIO_T5 && fs->type &&
1076             memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
1077                 f->clipt = cxgbe_clip_alloc(dev, (u32 *)&fs->val.lip);
1078                 if (!f->clipt)
1079                         goto free_tid;
1080         }
1081
1082         /*
1083          * Convert the filter specification into our internal format.
1084          * We copy the PF/VF specification into the Outer VLAN field
1085          * here so the rest of the code -- including the interface to
1086          * the firmware -- doesn't have to constantly do these checks.
1087          */
1088         f->fs = *fs;
1089         f->fs.iq = iq;
1090         f->dev = dev;
1091
1092         iconf = adapter->params.tp.ingress_config;
1093
1094         /* Either PFVF or OVLAN can be active, but not both
1095          * So, if PFVF is enabled, then overwrite the OVLAN
1096          * fields with PFVF fields before writing the spec
1097          * to hardware.
1098          */
1099         if (iconf & F_VNIC) {
1100                 f->fs.val.ovlan = fs->val.pf << 13 | fs->val.vf;
1101                 f->fs.mask.ovlan = fs->mask.pf << 13 | fs->mask.vf;
1102                 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1103                 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1104         }
1105
1106         /*
1107          * Attempt to set the filter.  If we don't succeed, we clear
1108          * it and return the failure.
1109          */
1110         f->ctx = ctx;
1111         f->tid = fidx; /* Save the actual tid */
1112         ret = set_filter_wr(dev, filter_id);
1113         if (ret)
1114                 goto free_tid;
1115
1116         return ret;
1117
1118 free_tid:
1119         cxgbe_clear_ftid(&adapter->tids, filter_id, nentries);
1120         clear_filter(f);
1121         return ret;
1122 }
1123
1124 /**
1125  * Handle a Hash filter write reply.
1126  */
1127 void cxgbe_hash_filter_rpl(struct adapter *adap,
1128                            const struct cpl_act_open_rpl *rpl)
1129 {
1130         struct tid_info *t = &adap->tids;
1131         struct filter_entry *f;
1132         struct filter_ctx *ctx = NULL;
1133         unsigned int tid = GET_TID(rpl);
1134         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
1135                                       (be32_to_cpu(rpl->atid_status)));
1136         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
1137
1138         f = lookup_atid(t, ftid);
1139         if (!f) {
1140                 dev_warn(adap, "%s: could not find filter entry: %d\n",
1141                          __func__, ftid);
1142                 return;
1143         }
1144
1145         ctx = f->ctx;
1146         f->ctx = NULL;
1147
1148         switch (status) {
1149         case CPL_ERR_NONE: {
1150                 f->tid = tid;
1151                 f->pending = 0;  /* asynchronous setup completed */
1152                 f->valid = 1;
1153
1154                 cxgbe_insert_tid(t, f, f->tid, 0);
1155                 cxgbe_free_atid(t, ftid);
1156                 if (ctx) {
1157                         ctx->tid = f->tid;
1158                         ctx->result = 0;
1159                 }
1160                 if (f->fs.hitcnts)
1161                         set_tcb_field(adap, tid,
1162                                       W_TCB_TIMESTAMP,
1163                                       V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
1164                                       V_TCB_T_RTT_TS_RECENT_AGE
1165                                               (M_TCB_T_RTT_TS_RECENT_AGE),
1166                                       V_TCB_TIMESTAMP(0ULL) |
1167                                       V_TCB_T_RTT_TS_RECENT_AGE(0ULL),
1168                                       1);
1169                 if (f->fs.newdmac)
1170                         set_tcb_tflag(adap, tid, S_TF_CCTRL_ECE, 1, 1);
1171                 if (f->fs.newvlan == VLAN_INSERT ||
1172                     f->fs.newvlan == VLAN_REWRITE)
1173                         set_tcb_tflag(adap, tid, S_TF_CCTRL_RFR, 1, 1);
1174                 if (f->fs.newsmac) {
1175                         set_tcb_tflag(adap, tid, S_TF_CCTRL_CWR, 1, 1);
1176                         set_tcb_field(adap, tid, W_TCB_SMAC_SEL,
1177                                       V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1178                                       V_TCB_SMAC_SEL(f->smt->hw_idx), 1);
1179                 }
1180                 break;
1181         }
1182         default:
1183                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
1184                          __func__, status);
1185
1186                 if (ctx) {
1187                         if (status == CPL_ERR_TCAM_FULL)
1188                                 ctx->result = -EAGAIN;
1189                         else
1190                                 ctx->result = -EINVAL;
1191                 }
1192
1193                 cxgbe_free_atid(t, ftid);
1194                 t4_os_free(f);
1195         }
1196
1197         if (ctx)
1198                 t4_complete(&ctx->completion);
1199 }
1200
1201 /**
1202  * Handle a LE-TCAM filter write/deletion reply.
1203  */
1204 void cxgbe_filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1205 {
1206         struct filter_entry *f = NULL;
1207         unsigned int tid = GET_TID(rpl);
1208         int idx, max_fidx = adap->tids.nftids;
1209
1210         /* Get the corresponding filter entry for this tid */
1211         if (adap->tids.ftid_tab) {
1212                 /* Check this in normal filter region */
1213                 idx = tid - adap->tids.ftid_base;
1214                 if (idx >= max_fidx)
1215                         return;
1216
1217                 f = &adap->tids.ftid_tab[idx];
1218                 if (f->tid != tid)
1219                         return;
1220         }
1221
1222         /* We found the filter entry for this tid */
1223         if (f) {
1224                 unsigned int ret = G_COOKIE(rpl->cookie);
1225                 struct filter_ctx *ctx;
1226
1227                 /*
1228                  * Pull off any filter operation context attached to the
1229                  * filter.
1230                  */
1231                 ctx = f->ctx;
1232                 f->ctx = NULL;
1233
1234                 if (ret == FW_FILTER_WR_FLT_ADDED) {
1235                         f->pending = 0;  /* asynchronous setup completed */
1236                         f->valid = 1;
1237                         if (ctx) {
1238                                 ctx->tid = f->tid;
1239                                 ctx->result = 0;
1240                         }
1241                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1242                         /*
1243                          * Clear the filter when we get confirmation from the
1244                          * hardware that the filter has been deleted.
1245                          */
1246                         clear_filter(f);
1247                         if (ctx)
1248                                 ctx->result = 0;
1249                 } else {
1250                         /*
1251                          * Something went wrong.  Issue a warning about the
1252                          * problem and clear everything out.
1253                          */
1254                         dev_warn(adap, "filter %u setup failed with error %u\n",
1255                                  idx, ret);
1256                         clear_filter(f);
1257                         if (ctx)
1258                                 ctx->result = -EINVAL;
1259                 }
1260
1261                 if (ctx)
1262                         t4_complete(&ctx->completion);
1263         }
1264 }
1265
1266 /*
1267  * Retrieve the packet count for the specified filter.
1268  */
1269 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1270                            u64 *c, int hash, bool get_byte)
1271 {
1272         struct filter_entry *f;
1273         unsigned int tcb_base, tcbaddr;
1274         int ret;
1275
1276         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1277         if (is_hashfilter(adapter) && hash) {
1278                 if (fidx < adapter->tids.ntids) {
1279                         f = adapter->tids.tid_tab[fidx];
1280                         if (!f)
1281                                 return -EINVAL;
1282
1283                         if (is_t5(adapter->params.chip)) {
1284                                 *c = 0;
1285                                 return 0;
1286                         }
1287                         tcbaddr = tcb_base + (fidx * TCB_SIZE);
1288                         goto get_count;
1289                 } else {
1290                         return -ERANGE;
1291                 }
1292         } else {
1293                 if (fidx >= adapter->tids.nftids)
1294                         return -ERANGE;
1295
1296                 f = &adapter->tids.ftid_tab[fidx];
1297                 if (!f->valid)
1298                         return -EINVAL;
1299
1300                 tcbaddr = tcb_base + f->tid * TCB_SIZE;
1301         }
1302
1303         f = &adapter->tids.ftid_tab[fidx];
1304         if (!f->valid)
1305                 return -EINVAL;
1306
1307 get_count:
1308         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1309                 /*
1310                  * For T5, the Filter Packet Hit Count is maintained as a
1311                  * 32-bit Big Endian value in the TCB field {timestamp}.
1312                  * Similar to the craziness above, instead of the filter hit
1313                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1314                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1315                  */
1316                 if (get_byte) {
1317                         unsigned int word_offset = 4;
1318                         __be64 be64_byte_count;
1319
1320                         t4_os_lock(&adapter->win0_lock);
1321                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1322                                            tcbaddr +
1323                                            (word_offset * sizeof(__be32)),
1324                                            sizeof(be64_byte_count),
1325                                            &be64_byte_count,
1326                                            T4_MEMORY_READ);
1327                         t4_os_unlock(&adapter->win0_lock);
1328                         if (ret < 0)
1329                                 return ret;
1330                         *c = be64_to_cpu(be64_byte_count);
1331                 } else {
1332                         unsigned int word_offset = 6;
1333                         __be32 be32_count;
1334
1335                         t4_os_lock(&adapter->win0_lock);
1336                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1337                                            tcbaddr +
1338                                            (word_offset * sizeof(__be32)),
1339                                            sizeof(be32_count), &be32_count,
1340                                            T4_MEMORY_READ);
1341                         t4_os_unlock(&adapter->win0_lock);
1342                         if (ret < 0)
1343                                 return ret;
1344                         *c = (u64)be32_to_cpu(be32_count);
1345                 }
1346         }
1347         return 0;
1348 }
1349
1350 /*
1351  * Clear the packet count for the specified filter.
1352  */
1353 int cxgbe_clear_filter_count(struct adapter *adapter, unsigned int fidx,
1354                              int hash, bool clear_byte)
1355 {
1356         u64 tcb_mask = 0, tcb_val = 0;
1357         struct filter_entry *f = NULL;
1358         u16 tcb_word = 0;
1359
1360         if (is_hashfilter(adapter) && hash) {
1361                 if (fidx >= adapter->tids.ntids)
1362                         return -ERANGE;
1363
1364                 /* No hitcounts supported for T5 hashfilters */
1365                 if (is_t5(adapter->params.chip))
1366                         return 0;
1367
1368                 f = adapter->tids.tid_tab[fidx];
1369         } else {
1370                 if (fidx >= adapter->tids.nftids)
1371                         return -ERANGE;
1372
1373                 f = &adapter->tids.ftid_tab[fidx];
1374         }
1375
1376         if (!f || !f->valid)
1377                 return -EINVAL;
1378
1379         tcb_word = W_TCB_TIMESTAMP;
1380         tcb_mask = V_TCB_TIMESTAMP(M_TCB_TIMESTAMP);
1381         tcb_val = V_TCB_TIMESTAMP(0ULL);
1382
1383         set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1384
1385         if (clear_byte) {
1386                 tcb_word = W_TCB_T_RTT_TS_RECENT_AGE;
1387                 tcb_mask =
1388                         V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE) |
1389                         V_TCB_T_RTSEQ_RECENT(M_TCB_T_RTSEQ_RECENT);
1390                 tcb_val = V_TCB_T_RTT_TS_RECENT_AGE(0ULL) |
1391                           V_TCB_T_RTSEQ_RECENT(0ULL);
1392
1393                 set_tcb_field(adapter, f->tid, tcb_word, tcb_mask, tcb_val, 1);
1394         }
1395
1396         return 0;
1397 }
1398
1399 /**
1400  * Handle a Hash filter delete reply.
1401  */
1402 void cxgbe_hash_del_filter_rpl(struct adapter *adap,
1403                                const struct cpl_abort_rpl_rss *rpl)
1404 {
1405         struct tid_info *t = &adap->tids;
1406         struct filter_entry *f;
1407         struct filter_ctx *ctx = NULL;
1408         unsigned int tid = GET_TID(rpl);
1409
1410         f = lookup_tid(t, tid);
1411         if (!f) {
1412                 dev_warn(adap, "%s: could not find filter entry: %u\n",
1413                          __func__, tid);
1414                 return;
1415         }
1416
1417         ctx = f->ctx;
1418         f->ctx = NULL;
1419
1420         f->valid = 0;
1421
1422         if (f->clipt)
1423                 cxgbe_clip_release(f->dev, f->clipt);
1424
1425         cxgbe_remove_tid(t, 0, tid, 0);
1426         t4_os_free(f);
1427
1428         if (ctx) {
1429                 ctx->result = 0;
1430                 t4_complete(&ctx->completion);
1431         }
1432 }