net/cxgbe: support to delete flows in HASH region
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6 #include "common.h"
7 #include "t4_tcb.h"
8 #include "t4_regs.h"
9 #include "cxgbe_filter.h"
10 #include "clip_tbl.h"
11
12 /**
13  * Initialize Hash Filters
14  */
15 int init_hash_filter(struct adapter *adap)
16 {
17         unsigned int n_user_filters;
18         unsigned int user_filter_perc;
19         int ret;
20         u32 params[7], val[7];
21
22 #define FW_PARAM_DEV(param) \
23         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
24         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
25
26 #define FW_PARAM_PFVF(param) \
27         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
28         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
29         V_FW_PARAMS_PARAM_Y(0) | \
30         V_FW_PARAMS_PARAM_Z(0))
31
32         params[0] = FW_PARAM_DEV(NTID);
33         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
34                               params, val);
35         if (ret < 0)
36                 return ret;
37         adap->tids.ntids = val[0];
38         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
39
40         user_filter_perc = 100;
41         n_user_filters = mult_frac(adap->tids.nftids,
42                                    user_filter_perc,
43                                    100);
44
45         adap->tids.nftids = n_user_filters;
46         adap->params.hash_filter = 1;
47         return 0;
48 }
49
50 /**
51  * Validate if the requested filter specification can be set by checking
52  * if the requested features have been enabled
53  */
54 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
55 {
56         u32 fconf;
57
58         /*
59          * Check for unconfigured fields being used.
60          */
61         fconf = adapter->params.tp.vlan_pri_map;
62
63 #define S(_field) \
64         (fs->val._field || fs->mask._field)
65 #define U(_mask, _field) \
66         (!(fconf & (_mask)) && S(_field))
67
68         if (U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
69                 return -EOPNOTSUPP;
70
71 #undef S
72 #undef U
73         return 0;
74 }
75
76 /**
77  * Get the queue to which the traffic must be steered to.
78  */
79 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
80                                       struct ch_filter_specification *fs)
81 {
82         struct port_info *pi = ethdev2pinfo(dev);
83         struct adapter *adapter = pi->adapter;
84         unsigned int iq;
85
86         /*
87          * If the user has requested steering matching Ingress Packets
88          * to a specific Queue Set, we need to make sure it's in range
89          * for the port and map that into the Absolute Queue ID of the
90          * Queue Set's Response Queue.
91          */
92         if (!fs->dirsteer) {
93                 iq = 0;
94         } else {
95                 /*
96                  * If the iq id is greater than the number of qsets,
97                  * then assume it is an absolute qid.
98                  */
99                 if (fs->iq < pi->n_rx_qsets)
100                         iq = adapter->sge.ethrxq[pi->first_qset +
101                                                  fs->iq].rspq.abs_id;
102                 else
103                         iq = fs->iq;
104         }
105
106         return iq;
107 }
108
109 /* Return an error number if the indicated filter isn't writable ... */
110 int writable_filter(struct filter_entry *f)
111 {
112         if (f->locked)
113                 return -EPERM;
114         if (f->pending)
115                 return -EBUSY;
116
117         return 0;
118 }
119
120 /**
121  * Build a CPL_SET_TCB_FIELD message as payload of a ULP_TX_PKT command.
122  */
123 static inline void mk_set_tcb_field_ulp(struct filter_entry *f,
124                                         struct cpl_set_tcb_field *req,
125                                         unsigned int word,
126                                         u64 mask, u64 val, u8 cookie,
127                                         int no_reply)
128 {
129         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
130         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
131
132         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
133                                       V_ULP_TXPKT_DEST(0));
134         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*req), 16));
135         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
136         sc->len = cpu_to_be32(sizeof(*req) - sizeof(struct work_request_hdr));
137         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
138         req->reply_ctrl = cpu_to_be16(V_NO_REPLY(no_reply) | V_REPLY_CHAN(0) |
139                                       V_QUEUENO(0));
140         req->word_cookie = cpu_to_be16(V_WORD(word) | V_COOKIE(cookie));
141         req->mask = cpu_to_be64(mask);
142         req->val = cpu_to_be64(val);
143         sc = (struct ulptx_idata *)(req + 1);
144         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
145         sc->len = cpu_to_be32(0);
146 }
147
148 /**
149  * Check if entry already filled.
150  */
151 bool is_filter_set(struct tid_info *t, int fidx, int family)
152 {
153         bool result = FALSE;
154         int i, max;
155
156         /* IPv6 requires four slots and IPv4 requires only 1 slot.
157          * Ensure, there's enough slots available.
158          */
159         max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
160
161         t4_os_lock(&t->ftid_lock);
162         for (i = fidx; i <= max; i++) {
163                 if (rte_bitmap_get(t->ftid_bmap, i)) {
164                         result = TRUE;
165                         break;
166                 }
167         }
168         t4_os_unlock(&t->ftid_lock);
169         return result;
170 }
171
172 /**
173  * Allocate a available free entry
174  */
175 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
176 {
177         struct tid_info *t = &adap->tids;
178         int pos;
179         int size = t->nftids;
180
181         t4_os_lock(&t->ftid_lock);
182         if (family == FILTER_TYPE_IPV6)
183                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
184         else
185                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
186         t4_os_unlock(&t->ftid_lock);
187
188         return pos < size ? pos : -1;
189 }
190
191 /**
192  * Construct hash filter ntuple.
193  */
194 static u64 hash_filter_ntuple(const struct filter_entry *f)
195 {
196         struct adapter *adap = ethdev2adap(f->dev);
197         struct tp_params *tp = &adap->params.tp;
198         u64 ntuple = 0;
199         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
200
201         if (tp->protocol_shift >= 0) {
202                 if (!f->fs.val.proto)
203                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
204                 else
205                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
206         }
207
208         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
209                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
210
211         if (ntuple != tp->hash_filter_mask)
212                 return 0;
213
214         return ntuple;
215 }
216
217 /**
218  * Build a CPL_ABORT_REQ message as payload of a ULP_TX_PKT command.
219  */
220 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req,
221                              unsigned int tid)
222 {
223         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
224         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
225
226         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
227                                       V_ULP_TXPKT_DEST(0));
228         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_req), 16));
229         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
230         sc->len = cpu_to_be32(sizeof(*abort_req) -
231                               sizeof(struct work_request_hdr));
232         OPCODE_TID(abort_req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
233         abort_req->rsvd0 = cpu_to_be32(0);
234         abort_req->rsvd1 = 0;
235         abort_req->cmd = CPL_ABORT_NO_RST;
236         sc = (struct ulptx_idata *)(abort_req + 1);
237         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
238         sc->len = cpu_to_be32(0);
239 }
240
241 /**
242  * Build a CPL_ABORT_RPL message as payload of a ULP_TX_PKT command.
243  */
244 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl,
245                              unsigned int tid)
246 {
247         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
248         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
249
250         txpkt->cmd_dest = cpu_to_be32(V_ULPTX_CMD(ULP_TX_PKT) |
251                                       V_ULP_TXPKT_DEST(0));
252         txpkt->len = cpu_to_be32(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
253         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
254         sc->len = cpu_to_be32(sizeof(*abort_rpl) -
255                               sizeof(struct work_request_hdr));
256         OPCODE_TID(abort_rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
257         abort_rpl->rsvd0 = cpu_to_be32(0);
258         abort_rpl->rsvd1 = 0;
259         abort_rpl->cmd = CPL_ABORT_NO_RST;
260         sc = (struct ulptx_idata *)(abort_rpl + 1);
261         sc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
262         sc->len = cpu_to_be32(0);
263 }
264
265 /**
266  * Delete the specified hash filter.
267  */
268 static int cxgbe_del_hash_filter(struct rte_eth_dev *dev,
269                                  unsigned int filter_id,
270                                  struct filter_ctx *ctx)
271 {
272         struct adapter *adapter = ethdev2adap(dev);
273         struct tid_info *t = &adapter->tids;
274         struct filter_entry *f;
275         struct sge_ctrl_txq *ctrlq;
276         unsigned int port_id = ethdev2pinfo(dev)->port_id;
277         int ret;
278
279         if (filter_id > adapter->tids.ntids)
280                 return -E2BIG;
281
282         f = lookup_tid(t, filter_id);
283         if (!f) {
284                 dev_err(adapter, "%s: no filter entry for filter_id = %d\n",
285                         __func__, filter_id);
286                 return -EINVAL;
287         }
288
289         ret = writable_filter(f);
290         if (ret)
291                 return ret;
292
293         if (f->valid) {
294                 unsigned int wrlen;
295                 struct rte_mbuf *mbuf;
296                 struct work_request_hdr *wr;
297                 struct ulptx_idata *aligner;
298                 struct cpl_set_tcb_field *req;
299                 struct cpl_abort_req *abort_req;
300                 struct cpl_abort_rpl *abort_rpl;
301
302                 f->ctx = ctx;
303                 f->pending = 1;
304
305                 wrlen = cxgbe_roundup(sizeof(*wr) +
306                                       (sizeof(*req) + sizeof(*aligner)) +
307                                       sizeof(*abort_req) + sizeof(*abort_rpl),
308                                       16);
309
310                 ctrlq = &adapter->sge.ctrlq[port_id];
311                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
312                 if (!mbuf) {
313                         dev_err(adapter, "%s: could not allocate skb ..\n",
314                                 __func__);
315                         goto out_err;
316                 }
317
318                 mbuf->data_len = wrlen;
319                 mbuf->pkt_len = mbuf->data_len;
320
321                 req = rte_pktmbuf_mtod(mbuf, struct cpl_set_tcb_field *);
322                 INIT_ULPTX_WR(req, wrlen, 0, 0);
323                 wr = (struct work_request_hdr *)req;
324                 wr++;
325                 req = (struct cpl_set_tcb_field *)wr;
326                 mk_set_tcb_field_ulp(f, req, W_TCB_RSS_INFO,
327                                 V_TCB_RSS_INFO(M_TCB_RSS_INFO),
328                                 V_TCB_RSS_INFO(adapter->sge.fw_evtq.abs_id),
329                                 0, 1);
330                 aligner = (struct ulptx_idata *)(req + 1);
331                 abort_req = (struct cpl_abort_req *)(aligner + 1);
332                 mk_abort_req_ulp(abort_req, f->tid);
333                 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
334                 mk_abort_rpl_ulp(abort_rpl, f->tid);
335                 t4_mgmt_tx(ctrlq, mbuf);
336         }
337         return 0;
338
339 out_err:
340         return -ENOMEM;
341 }
342
343 /**
344  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
345  */
346 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
347                              unsigned int qid_filterid, struct adapter *adap)
348 {
349         struct cpl_t6_act_open_req6 *req = NULL;
350         u64 local_lo, local_hi, peer_lo, peer_hi;
351         u32 *lip = (u32 *)f->fs.val.lip;
352         u32 *fip = (u32 *)f->fs.val.fip;
353
354         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
355         case CHELSIO_T6:
356                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
357
358                 INIT_TP_WR(req, 0);
359                 break;
360         default:
361                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
362                 return;
363         }
364
365         local_hi = ((u64)lip[1]) << 32 | lip[0];
366         local_lo = ((u64)lip[3]) << 32 | lip[2];
367         peer_hi = ((u64)fip[1]) << 32 | fip[0];
368         peer_lo = ((u64)fip[3]) << 32 | fip[2];
369
370         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
371                                                     qid_filterid));
372         req->local_port = cpu_to_be16(f->fs.val.lport);
373         req->peer_port = cpu_to_be16(f->fs.val.fport);
374         req->local_ip_hi = local_hi;
375         req->local_ip_lo = local_lo;
376         req->peer_ip_hi = peer_hi;
377         req->peer_ip_lo = peer_lo;
378         req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
379                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
380                                            << 1) |
381                                 V_ULP_MODE(ULP_MODE_NONE) |
382                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
383         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
384         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
385                             V_RSS_QUEUE(f->fs.iq) |
386                             F_T5_OPT_2_VALID |
387                             F_RX_CHANNEL |
388                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
389                                          (f->fs.dirsteer << 1)));
390 }
391
392 /**
393  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
394  */
395 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
396                             unsigned int qid_filterid, struct adapter *adap)
397 {
398         struct cpl_t6_act_open_req *req = NULL;
399
400         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
401         case CHELSIO_T6:
402                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
403
404                 INIT_TP_WR(req, 0);
405                 break;
406         default:
407                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
408                 return;
409         }
410
411         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
412                                                     qid_filterid));
413         req->local_port = cpu_to_be16(f->fs.val.lport);
414         req->peer_port = cpu_to_be16(f->fs.val.fport);
415         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
416                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
417         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
418                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
419         req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
420                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
421                                            << 1) |
422                                 V_ULP_MODE(ULP_MODE_NONE) |
423                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
424         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
425         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
426                             V_RSS_QUEUE(f->fs.iq) |
427                             F_T5_OPT_2_VALID |
428                             F_RX_CHANNEL |
429                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
430                                          (f->fs.dirsteer << 1)));
431 }
432
433 /**
434  * Set the specified hash filter.
435  */
436 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
437                                  struct ch_filter_specification *fs,
438                                  struct filter_ctx *ctx)
439 {
440         struct port_info *pi = ethdev2pinfo(dev);
441         struct adapter *adapter = pi->adapter;
442         struct tid_info *t = &adapter->tids;
443         struct filter_entry *f;
444         struct rte_mbuf *mbuf;
445         struct sge_ctrl_txq *ctrlq;
446         unsigned int iq;
447         int atid, size;
448         int ret = 0;
449
450         ret = validate_filter(adapter, fs);
451         if (ret)
452                 return ret;
453
454         iq = get_filter_steerq(dev, fs);
455
456         ctrlq = &adapter->sge.ctrlq[pi->port_id];
457
458         f = t4_os_alloc(sizeof(*f));
459         if (!f)
460                 goto out_err;
461
462         f->fs = *fs;
463         f->ctx = ctx;
464         f->dev = dev;
465         f->fs.iq = iq;
466
467         atid = cxgbe_alloc_atid(t, f);
468         if (atid < 0)
469                 goto out_err;
470
471         if (f->fs.type) {
472                 /* IPv6 hash filter */
473                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
474                 if (!f->clipt)
475                         goto free_atid;
476
477                 size = sizeof(struct cpl_t6_act_open_req6);
478                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
479                 if (!mbuf) {
480                         ret = -ENOMEM;
481                         goto free_clip;
482                 }
483
484                 mbuf->data_len = size;
485                 mbuf->pkt_len = mbuf->data_len;
486
487                 mk_act_open_req6(f, mbuf,
488                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
489                                  adapter);
490         } else {
491                 /* IPv4 hash filter */
492                 size = sizeof(struct cpl_t6_act_open_req);
493                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
494                 if (!mbuf) {
495                         ret = -ENOMEM;
496                         goto free_atid;
497                 }
498
499                 mbuf->data_len = size;
500                 mbuf->pkt_len = mbuf->data_len;
501
502                 mk_act_open_req(f, mbuf,
503                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
504                                 adapter);
505         }
506
507         f->pending = 1;
508         t4_mgmt_tx(ctrlq, mbuf);
509         return 0;
510
511 free_clip:
512         cxgbe_clip_release(f->dev, f->clipt);
513 free_atid:
514         cxgbe_free_atid(t, atid);
515
516 out_err:
517         t4_os_free(f);
518         return ret;
519 }
520
521 /**
522  * Clear a filter and release any of its resources that we own.  This also
523  * clears the filter's "pending" status.
524  */
525 void clear_filter(struct filter_entry *f)
526 {
527         if (f->clipt)
528                 cxgbe_clip_release(f->dev, f->clipt);
529
530         /*
531          * The zeroing of the filter rule below clears the filter valid,
532          * pending, locked flags etc. so it's all we need for
533          * this operation.
534          */
535         memset(f, 0, sizeof(*f));
536 }
537
538 /**
539  * t4_mk_filtdelwr - create a delete filter WR
540  * @ftid: the filter ID
541  * @wr: the filter work request to populate
542  * @qid: ingress queue to receive the delete notification
543  *
544  * Creates a filter work request to delete the supplied filter.  If @qid is
545  * negative the delete notification is suppressed.
546  */
547 static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
548 {
549         memset(wr, 0, sizeof(*wr));
550         wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
551         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
552         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
553                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
554         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
555         if (qid >= 0)
556                 wr->rx_chan_rx_rpl_iq =
557                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
558 }
559
560 /**
561  * Create FW work request to delete the filter at a specified index
562  */
563 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
564 {
565         struct adapter *adapter = ethdev2adap(dev);
566         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
567         struct rte_mbuf *mbuf;
568         struct fw_filter_wr *fwr;
569         struct sge_ctrl_txq *ctrlq;
570         unsigned int port_id = ethdev2pinfo(dev)->port_id;
571
572         ctrlq = &adapter->sge.ctrlq[port_id];
573         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
574         if (!mbuf)
575                 return -ENOMEM;
576
577         mbuf->data_len = sizeof(*fwr);
578         mbuf->pkt_len = mbuf->data_len;
579
580         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
581         t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
582
583         /*
584          * Mark the filter as "pending" and ship off the Filter Work Request.
585          * When we get the Work Request Reply we'll clear the pending status.
586          */
587         f->pending = 1;
588         t4_mgmt_tx(ctrlq, mbuf);
589         return 0;
590 }
591
592 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
593 {
594         struct adapter *adapter = ethdev2adap(dev);
595         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
596         struct rte_mbuf *mbuf;
597         struct fw_filter_wr *fwr;
598         struct sge_ctrl_txq *ctrlq;
599         unsigned int port_id = ethdev2pinfo(dev)->port_id;
600         int ret;
601
602         ctrlq = &adapter->sge.ctrlq[port_id];
603         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
604         if (!mbuf) {
605                 ret = -ENOMEM;
606                 goto out;
607         }
608
609         mbuf->data_len = sizeof(*fwr);
610         mbuf->pkt_len = mbuf->data_len;
611
612         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
613         memset(fwr, 0, sizeof(*fwr));
614
615         /*
616          * Construct the work request to set the filter.
617          */
618         fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
619         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
620         fwr->tid_to_iq =
621                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
622                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
623                             V_FW_FILTER_WR_NOREPLY(0) |
624                             V_FW_FILTER_WR_IQ(f->fs.iq));
625         fwr->del_filter_to_l2tix =
626                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
627                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
628                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
629                             V_FW_FILTER_WR_PRIO(f->fs.prio));
630         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
631         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
632         fwr->smac_sel = 0;
633         fwr->rx_chan_rx_rpl_iq =
634                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
635                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
636                                                      ));
637         fwr->ptcl = f->fs.val.proto;
638         fwr->ptclm = f->fs.mask.proto;
639         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
640         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
641         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
642         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
643         fwr->lp = cpu_to_be16(f->fs.val.lport);
644         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
645         fwr->fp = cpu_to_be16(f->fs.val.fport);
646         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
647
648         /*
649          * Mark the filter as "pending" and ship off the Filter Work Request.
650          * When we get the Work Request Reply we'll clear the pending status.
651          */
652         f->pending = 1;
653         t4_mgmt_tx(ctrlq, mbuf);
654         return 0;
655
656 out:
657         return ret;
658 }
659
660 /**
661  * Set the corresponding entry in the bitmap. 4 slots are
662  * marked for IPv6, whereas only 1 slot is marked for IPv4.
663  */
664 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
665 {
666         t4_os_lock(&t->ftid_lock);
667         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
668                 t4_os_unlock(&t->ftid_lock);
669                 return -EBUSY;
670         }
671
672         if (family == FILTER_TYPE_IPV4) {
673                 rte_bitmap_set(t->ftid_bmap, fidx);
674         } else {
675                 rte_bitmap_set(t->ftid_bmap, fidx);
676                 rte_bitmap_set(t->ftid_bmap, fidx + 1);
677                 rte_bitmap_set(t->ftid_bmap, fidx + 2);
678                 rte_bitmap_set(t->ftid_bmap, fidx + 3);
679         }
680         t4_os_unlock(&t->ftid_lock);
681         return 0;
682 }
683
684 /**
685  * Clear the corresponding entry in the bitmap. 4 slots are
686  * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
687  */
688 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
689 {
690         t4_os_lock(&t->ftid_lock);
691         if (family == FILTER_TYPE_IPV4) {
692                 rte_bitmap_clear(t->ftid_bmap, fidx);
693         } else {
694                 rte_bitmap_clear(t->ftid_bmap, fidx);
695                 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
696                 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
697                 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
698         }
699         t4_os_unlock(&t->ftid_lock);
700 }
701
702 /**
703  * Check a delete filter request for validity and send it to the hardware.
704  * Return 0 on success, an error number otherwise.  We attach any provided
705  * filter operation context to the internal filter specification in order to
706  * facilitate signaling completion of the operation.
707  */
708 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
709                      struct ch_filter_specification *fs,
710                      struct filter_ctx *ctx)
711 {
712         struct port_info *pi = (struct port_info *)(dev->data->dev_private);
713         struct adapter *adapter = pi->adapter;
714         struct filter_entry *f;
715         unsigned int chip_ver;
716         int ret;
717
718         if (is_hashfilter(adapter) && fs->cap)
719                 return cxgbe_del_hash_filter(dev, filter_id, ctx);
720
721         if (filter_id >= adapter->tids.nftids)
722                 return -ERANGE;
723
724         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
725
726         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
727         if (!ret) {
728                 dev_warn(adap, "%s: could not find filter entry: %u\n",
729                          __func__, filter_id);
730                 return -EINVAL;
731         }
732
733         /*
734          * Ensure filter id is aligned on the 2 slot boundary for T6,
735          * and 4 slot boundary for cards below T6.
736          */
737         if (fs->type) {
738                 if (chip_ver < CHELSIO_T6)
739                         filter_id &= ~(0x3);
740                 else
741                         filter_id &= ~(0x1);
742         }
743
744         f = &adapter->tids.ftid_tab[filter_id];
745         ret = writable_filter(f);
746         if (ret)
747                 return ret;
748
749         if (f->valid) {
750                 f->ctx = ctx;
751                 cxgbe_clear_ftid(&adapter->tids,
752                                  f->tid - adapter->tids.ftid_base,
753                                  f->fs.type ? FILTER_TYPE_IPV6 :
754                                               FILTER_TYPE_IPV4);
755                 return del_filter_wr(dev, filter_id);
756         }
757
758         /*
759          * If the caller has passed in a Completion Context then we need to
760          * mark it as a successful completion so they don't stall waiting
761          * for it.
762          */
763         if (ctx) {
764                 ctx->result = 0;
765                 t4_complete(&ctx->completion);
766         }
767
768         return 0;
769 }
770
771 /**
772  * Check a Chelsio Filter Request for validity, convert it into our internal
773  * format and send it to the hardware.  Return 0 on success, an error number
774  * otherwise.  We attach any provided filter operation context to the internal
775  * filter specification in order to facilitate signaling completion of the
776  * operation.
777  */
778 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
779                      struct ch_filter_specification *fs,
780                      struct filter_ctx *ctx)
781 {
782         struct port_info *pi = ethdev2pinfo(dev);
783         struct adapter *adapter = pi->adapter;
784         unsigned int fidx, iq, fid_bit = 0;
785         struct filter_entry *f;
786         unsigned int chip_ver;
787         uint8_t bitoff[16] = {0};
788         int ret;
789
790         if (is_hashfilter(adapter) && fs->cap)
791                 return cxgbe_set_hash_filter(dev, fs, ctx);
792
793         if (filter_id >= adapter->tids.nftids)
794                 return -ERANGE;
795
796         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
797
798         ret = validate_filter(adapter, fs);
799         if (ret)
800                 return ret;
801
802         /*
803          * Ensure filter id is aligned on the 4 slot boundary for IPv6
804          * maskfull filters.
805          */
806         if (fs->type)
807                 filter_id &= ~(0x3);
808
809         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
810         if (ret)
811                 return -EBUSY;
812
813         iq = get_filter_steerq(dev, fs);
814
815         /*
816          * IPv6 filters occupy four slots and must be aligned on four-slot
817          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
818          * must be aligned on two-slot boundaries.
819          *
820          * IPv4 filters only occupy a single slot and have no alignment
821          * requirements but writing a new IPv4 filter into the middle
822          * of an existing IPv6 filter requires clearing the old IPv6
823          * filter.
824          */
825         if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
826                 /*
827                  * For T6, If our IPv4 filter isn't being written to a
828                  * multiple of two filter index and there's an IPv6
829                  * filter at the multiple of 2 base slot, then we need
830                  * to delete that IPv6 filter ...
831                  * For adapters below T6, IPv6 filter occupies 4 entries.
832                  */
833                 if (chip_ver < CHELSIO_T6)
834                         fidx = filter_id & ~0x3;
835                 else
836                         fidx = filter_id & ~0x1;
837
838                 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
839                         f = &adapter->tids.ftid_tab[fidx];
840                         if (f->valid)
841                                 return -EBUSY;
842                 }
843         } else { /* IPv6 */
844                 unsigned int max_filter_id;
845
846                 if (chip_ver < CHELSIO_T6) {
847                         /*
848                          * Ensure that the IPv6 filter is aligned on a
849                          * multiple of 4 boundary.
850                          */
851                         if (filter_id & 0x3)
852                                 return -EINVAL;
853
854                         max_filter_id = filter_id + 4;
855                 } else {
856                         /*
857                          * For T6, CLIP being enabled, IPv6 filter would occupy
858                          * 2 entries.
859                          */
860                         if (filter_id & 0x1)
861                                 return -EINVAL;
862
863                         max_filter_id = filter_id + 2;
864                 }
865
866                 /*
867                  * Check all except the base overlapping IPv4 filter
868                  * slots.
869                  */
870                 for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
871                         f = &adapter->tids.ftid_tab[fidx];
872                         if (f->valid)
873                                 return -EBUSY;
874                 }
875         }
876
877         /*
878          * Check to make sure that provided filter index is not
879          * already in use by someone else
880          */
881         f = &adapter->tids.ftid_tab[filter_id];
882         if (f->valid)
883                 return -EBUSY;
884
885         fidx = adapter->tids.ftid_base + filter_id;
886         fid_bit = filter_id;
887         ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
888                              fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
889         if (ret)
890                 return ret;
891
892         /*
893          * Check to make sure the filter requested is writable ...
894          */
895         ret = writable_filter(f);
896         if (ret) {
897                 /* Clear the bits we have set above */
898                 cxgbe_clear_ftid(&adapter->tids, fid_bit,
899                                  fs->type ? FILTER_TYPE_IPV6 :
900                                             FILTER_TYPE_IPV4);
901                 return ret;
902         }
903
904         /*
905          * Allocate a clip table entry only if we have non-zero IPv6 address
906          */
907         if (chip_ver > CHELSIO_T5 && fs->type &&
908             memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
909                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
910                 if (!f->clipt)
911                         goto free_tid;
912         }
913
914         /*
915          * Convert the filter specification into our internal format.
916          * We copy the PF/VF specification into the Outer VLAN field
917          * here so the rest of the code -- including the interface to
918          * the firmware -- doesn't have to constantly do these checks.
919          */
920         f->fs = *fs;
921         f->fs.iq = iq;
922         f->dev = dev;
923
924         /*
925          * Attempt to set the filter.  If we don't succeed, we clear
926          * it and return the failure.
927          */
928         f->ctx = ctx;
929         f->tid = fidx; /* Save the actual tid */
930         ret = set_filter_wr(dev, filter_id);
931         if (ret) {
932                 fid_bit = f->tid - adapter->tids.ftid_base;
933                 goto free_tid;
934         }
935
936         return ret;
937
938 free_tid:
939         cxgbe_clear_ftid(&adapter->tids, fid_bit,
940                          fs->type ? FILTER_TYPE_IPV6 :
941                                     FILTER_TYPE_IPV4);
942         clear_filter(f);
943         return ret;
944 }
945
946 /**
947  * Handle a Hash filter write reply.
948  */
949 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
950 {
951         struct tid_info *t = &adap->tids;
952         struct filter_entry *f;
953         struct filter_ctx *ctx = NULL;
954         unsigned int tid = GET_TID(rpl);
955         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
956                                       (be32_to_cpu(rpl->atid_status)));
957         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
958
959         f = lookup_atid(t, ftid);
960         if (!f) {
961                 dev_warn(adap, "%s: could not find filter entry: %d\n",
962                          __func__, ftid);
963                 return;
964         }
965
966         ctx = f->ctx;
967         f->ctx = NULL;
968
969         switch (status) {
970         case CPL_ERR_NONE: {
971                 f->tid = tid;
972                 f->pending = 0;  /* asynchronous setup completed */
973                 f->valid = 1;
974
975                 cxgbe_insert_tid(t, f, f->tid, 0);
976                 cxgbe_free_atid(t, ftid);
977                 if (ctx) {
978                         ctx->tid = f->tid;
979                         ctx->result = 0;
980                 }
981                 break;
982         }
983         default:
984                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
985                          __func__, status);
986
987                 if (ctx) {
988                         if (status == CPL_ERR_TCAM_FULL)
989                                 ctx->result = -EAGAIN;
990                         else
991                                 ctx->result = -EINVAL;
992                 }
993
994                 cxgbe_free_atid(t, ftid);
995                 t4_os_free(f);
996         }
997
998         if (ctx)
999                 t4_complete(&ctx->completion);
1000 }
1001
1002 /**
1003  * Handle a LE-TCAM filter write/deletion reply.
1004  */
1005 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
1006 {
1007         struct filter_entry *f = NULL;
1008         unsigned int tid = GET_TID(rpl);
1009         int idx, max_fidx = adap->tids.nftids;
1010
1011         /* Get the corresponding filter entry for this tid */
1012         if (adap->tids.ftid_tab) {
1013                 /* Check this in normal filter region */
1014                 idx = tid - adap->tids.ftid_base;
1015                 if (idx >= max_fidx)
1016                         return;
1017
1018                 f = &adap->tids.ftid_tab[idx];
1019                 if (f->tid != tid)
1020                         return;
1021         }
1022
1023         /* We found the filter entry for this tid */
1024         if (f) {
1025                 unsigned int ret = G_COOKIE(rpl->cookie);
1026                 struct filter_ctx *ctx;
1027
1028                 /*
1029                  * Pull off any filter operation context attached to the
1030                  * filter.
1031                  */
1032                 ctx = f->ctx;
1033                 f->ctx = NULL;
1034
1035                 if (ret == FW_FILTER_WR_FLT_ADDED) {
1036                         f->pending = 0;  /* asynchronous setup completed */
1037                         f->valid = 1;
1038                         if (ctx) {
1039                                 ctx->tid = f->tid;
1040                                 ctx->result = 0;
1041                         }
1042                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
1043                         /*
1044                          * Clear the filter when we get confirmation from the
1045                          * hardware that the filter has been deleted.
1046                          */
1047                         clear_filter(f);
1048                         if (ctx)
1049                                 ctx->result = 0;
1050                 } else {
1051                         /*
1052                          * Something went wrong.  Issue a warning about the
1053                          * problem and clear everything out.
1054                          */
1055                         dev_warn(adap, "filter %u setup failed with error %u\n",
1056                                  idx, ret);
1057                         clear_filter(f);
1058                         if (ctx)
1059                                 ctx->result = -EINVAL;
1060                 }
1061
1062                 if (ctx)
1063                         t4_complete(&ctx->completion);
1064         }
1065 }
1066
1067 /*
1068  * Retrieve the packet count for the specified filter.
1069  */
1070 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
1071                            u64 *c, bool get_byte)
1072 {
1073         struct filter_entry *f;
1074         unsigned int tcb_base, tcbaddr;
1075         int ret;
1076
1077         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
1078         if (fidx >= adapter->tids.nftids)
1079                 return -ERANGE;
1080
1081         f = &adapter->tids.ftid_tab[fidx];
1082         if (!f->valid)
1083                 return -EINVAL;
1084
1085         tcbaddr = tcb_base + f->tid * TCB_SIZE;
1086
1087         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
1088                 /*
1089                  * For T5, the Filter Packet Hit Count is maintained as a
1090                  * 32-bit Big Endian value in the TCB field {timestamp}.
1091                  * Similar to the craziness above, instead of the filter hit
1092                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
1093                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
1094                  */
1095                 if (get_byte) {
1096                         unsigned int word_offset = 4;
1097                         __be64 be64_byte_count;
1098
1099                         t4_os_lock(&adapter->win0_lock);
1100                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1101                                            tcbaddr +
1102                                            (word_offset * sizeof(__be32)),
1103                                            sizeof(be64_byte_count),
1104                                            &be64_byte_count,
1105                                            T4_MEMORY_READ);
1106                         t4_os_unlock(&adapter->win0_lock);
1107                         if (ret < 0)
1108                                 return ret;
1109                         *c = be64_to_cpu(be64_byte_count);
1110                 } else {
1111                         unsigned int word_offset = 6;
1112                         __be32 be32_count;
1113
1114                         t4_os_lock(&adapter->win0_lock);
1115                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
1116                                            tcbaddr +
1117                                            (word_offset * sizeof(__be32)),
1118                                            sizeof(be32_count), &be32_count,
1119                                            T4_MEMORY_READ);
1120                         t4_os_unlock(&adapter->win0_lock);
1121                         if (ret < 0)
1122                                 return ret;
1123                         *c = (u64)be32_to_cpu(be32_count);
1124                 }
1125         }
1126         return 0;
1127 }
1128
1129 /**
1130  * Handle a Hash filter delete reply.
1131  */
1132 void hash_del_filter_rpl(struct adapter *adap,
1133                          const struct cpl_abort_rpl_rss *rpl)
1134 {
1135         struct tid_info *t = &adap->tids;
1136         struct filter_entry *f;
1137         struct filter_ctx *ctx = NULL;
1138         unsigned int tid = GET_TID(rpl);
1139
1140         f = lookup_tid(t, tid);
1141         if (!f) {
1142                 dev_warn(adap, "%s: could not find filter entry: %u\n",
1143                          __func__, tid);
1144                 return;
1145         }
1146
1147         ctx = f->ctx;
1148         f->ctx = NULL;
1149
1150         f->valid = 0;
1151
1152         if (f->clipt)
1153                 cxgbe_clip_release(f->dev, f->clipt);
1154
1155         cxgbe_remove_tid(t, 0, tid, 0);
1156         t4_os_free(f);
1157
1158         if (ctx) {
1159                 ctx->result = 0;
1160                 t4_complete(&ctx->completion);
1161         }
1162 }