net/cxgbe: support to offload flows to HASH region
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5 #include <rte_net.h>
6 #include "common.h"
7 #include "t4_regs.h"
8 #include "cxgbe_filter.h"
9 #include "clip_tbl.h"
10
11 /**
12  * Initialize Hash Filters
13  */
14 int init_hash_filter(struct adapter *adap)
15 {
16         unsigned int n_user_filters;
17         unsigned int user_filter_perc;
18         int ret;
19         u32 params[7], val[7];
20
21 #define FW_PARAM_DEV(param) \
22         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
23         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
24
25 #define FW_PARAM_PFVF(param) \
26         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
27         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
28         V_FW_PARAMS_PARAM_Y(0) | \
29         V_FW_PARAMS_PARAM_Z(0))
30
31         params[0] = FW_PARAM_DEV(NTID);
32         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
33                               params, val);
34         if (ret < 0)
35                 return ret;
36         adap->tids.ntids = val[0];
37         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
38
39         user_filter_perc = 100;
40         n_user_filters = mult_frac(adap->tids.nftids,
41                                    user_filter_perc,
42                                    100);
43
44         adap->tids.nftids = n_user_filters;
45         adap->params.hash_filter = 1;
46         return 0;
47 }
48
49 /**
50  * Validate if the requested filter specification can be set by checking
51  * if the requested features have been enabled
52  */
53 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
54 {
55         u32 fconf;
56
57         /*
58          * Check for unconfigured fields being used.
59          */
60         fconf = adapter->params.tp.vlan_pri_map;
61
62 #define S(_field) \
63         (fs->val._field || fs->mask._field)
64 #define U(_mask, _field) \
65         (!(fconf & (_mask)) && S(_field))
66
67         if (U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
68                 return -EOPNOTSUPP;
69
70 #undef S
71 #undef U
72         return 0;
73 }
74
75 /**
76  * Get the queue to which the traffic must be steered to.
77  */
78 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
79                                       struct ch_filter_specification *fs)
80 {
81         struct port_info *pi = ethdev2pinfo(dev);
82         struct adapter *adapter = pi->adapter;
83         unsigned int iq;
84
85         /*
86          * If the user has requested steering matching Ingress Packets
87          * to a specific Queue Set, we need to make sure it's in range
88          * for the port and map that into the Absolute Queue ID of the
89          * Queue Set's Response Queue.
90          */
91         if (!fs->dirsteer) {
92                 iq = 0;
93         } else {
94                 /*
95                  * If the iq id is greater than the number of qsets,
96                  * then assume it is an absolute qid.
97                  */
98                 if (fs->iq < pi->n_rx_qsets)
99                         iq = adapter->sge.ethrxq[pi->first_qset +
100                                                  fs->iq].rspq.abs_id;
101                 else
102                         iq = fs->iq;
103         }
104
105         return iq;
106 }
107
108 /* Return an error number if the indicated filter isn't writable ... */
109 int writable_filter(struct filter_entry *f)
110 {
111         if (f->locked)
112                 return -EPERM;
113         if (f->pending)
114                 return -EBUSY;
115
116         return 0;
117 }
118
119 /**
120  * Check if entry already filled.
121  */
122 bool is_filter_set(struct tid_info *t, int fidx, int family)
123 {
124         bool result = FALSE;
125         int i, max;
126
127         /* IPv6 requires four slots and IPv4 requires only 1 slot.
128          * Ensure, there's enough slots available.
129          */
130         max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
131
132         t4_os_lock(&t->ftid_lock);
133         for (i = fidx; i <= max; i++) {
134                 if (rte_bitmap_get(t->ftid_bmap, i)) {
135                         result = TRUE;
136                         break;
137                 }
138         }
139         t4_os_unlock(&t->ftid_lock);
140         return result;
141 }
142
143 /**
144  * Allocate a available free entry
145  */
146 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
147 {
148         struct tid_info *t = &adap->tids;
149         int pos;
150         int size = t->nftids;
151
152         t4_os_lock(&t->ftid_lock);
153         if (family == FILTER_TYPE_IPV6)
154                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
155         else
156                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
157         t4_os_unlock(&t->ftid_lock);
158
159         return pos < size ? pos : -1;
160 }
161
162 /**
163  * Construct hash filter ntuple.
164  */
165 static u64 hash_filter_ntuple(const struct filter_entry *f)
166 {
167         struct adapter *adap = ethdev2adap(f->dev);
168         struct tp_params *tp = &adap->params.tp;
169         u64 ntuple = 0;
170         u16 tcp_proto = IPPROTO_TCP; /* TCP Protocol Number */
171
172         if (tp->protocol_shift >= 0) {
173                 if (!f->fs.val.proto)
174                         ntuple |= (u64)tcp_proto << tp->protocol_shift;
175                 else
176                         ntuple |= (u64)f->fs.val.proto << tp->protocol_shift;
177         }
178
179         if (tp->ethertype_shift >= 0 && f->fs.mask.ethtype)
180                 ntuple |= (u64)(f->fs.val.ethtype) << tp->ethertype_shift;
181
182         if (ntuple != tp->hash_filter_mask)
183                 return 0;
184
185         return ntuple;
186 }
187
188 /**
189  * Build a ACT_OPEN_REQ6 message for setting IPv6 hash filter.
190  */
191 static void mk_act_open_req6(struct filter_entry *f, struct rte_mbuf *mbuf,
192                              unsigned int qid_filterid, struct adapter *adap)
193 {
194         struct cpl_t6_act_open_req6 *req = NULL;
195         u64 local_lo, local_hi, peer_lo, peer_hi;
196         u32 *lip = (u32 *)f->fs.val.lip;
197         u32 *fip = (u32 *)f->fs.val.fip;
198
199         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
200         case CHELSIO_T6:
201                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req6 *);
202
203                 INIT_TP_WR(req, 0);
204                 break;
205         default:
206                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
207                 return;
208         }
209
210         local_hi = ((u64)lip[1]) << 32 | lip[0];
211         local_lo = ((u64)lip[3]) << 32 | lip[2];
212         peer_hi = ((u64)fip[1]) << 32 | fip[0];
213         peer_lo = ((u64)fip[3]) << 32 | fip[2];
214
215         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
216                                                     qid_filterid));
217         req->local_port = cpu_to_be16(f->fs.val.lport);
218         req->peer_port = cpu_to_be16(f->fs.val.fport);
219         req->local_ip_hi = local_hi;
220         req->local_ip_lo = local_lo;
221         req->peer_ip_hi = peer_hi;
222         req->peer_ip_lo = peer_lo;
223         req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
224                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
225                                            << 1) |
226                                 V_ULP_MODE(ULP_MODE_NONE) |
227                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
228         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
229         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
230                             V_RSS_QUEUE(f->fs.iq) |
231                             F_T5_OPT_2_VALID |
232                             F_RX_CHANNEL |
233                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
234                                          (f->fs.dirsteer << 1)));
235 }
236
237 /**
238  * Build a ACT_OPEN_REQ message for setting IPv4 hash filter.
239  */
240 static void mk_act_open_req(struct filter_entry *f, struct rte_mbuf *mbuf,
241                             unsigned int qid_filterid, struct adapter *adap)
242 {
243         struct cpl_t6_act_open_req *req = NULL;
244
245         switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
246         case CHELSIO_T6:
247                 req = rte_pktmbuf_mtod(mbuf, struct cpl_t6_act_open_req *);
248
249                 INIT_TP_WR(req, 0);
250                 break;
251         default:
252                 dev_err(adap, "%s: unsupported chip type!\n", __func__);
253                 return;
254         }
255
256         OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
257                                                     qid_filterid));
258         req->local_port = cpu_to_be16(f->fs.val.lport);
259         req->peer_port = cpu_to_be16(f->fs.val.fport);
260         req->local_ip = f->fs.val.lip[0] | f->fs.val.lip[1] << 8 |
261                         f->fs.val.lip[2] << 16 | f->fs.val.lip[3] << 24;
262         req->peer_ip = f->fs.val.fip[0] | f->fs.val.fip[1] << 8 |
263                         f->fs.val.fip[2] << 16 | f->fs.val.fip[3] << 24;
264         req->opt0 = cpu_to_be64(V_DELACK(f->fs.hitcnts) |
265                                 V_SMAC_SEL((cxgbe_port_viid(f->dev) & 0x7F)
266                                            << 1) |
267                                 V_ULP_MODE(ULP_MODE_NONE) |
268                                 F_TCAM_BYPASS | F_NON_OFFLOAD);
269         req->params = cpu_to_be64(V_FILTER_TUPLE(hash_filter_ntuple(f)));
270         req->opt2 = cpu_to_be32(F_RSS_QUEUE_VALID |
271                             V_RSS_QUEUE(f->fs.iq) |
272                             F_T5_OPT_2_VALID |
273                             F_RX_CHANNEL |
274                             V_CONG_CNTRL((f->fs.action == FILTER_DROP) |
275                                          (f->fs.dirsteer << 1)));
276 }
277
278 /**
279  * Set the specified hash filter.
280  */
281 static int cxgbe_set_hash_filter(struct rte_eth_dev *dev,
282                                  struct ch_filter_specification *fs,
283                                  struct filter_ctx *ctx)
284 {
285         struct port_info *pi = ethdev2pinfo(dev);
286         struct adapter *adapter = pi->adapter;
287         struct tid_info *t = &adapter->tids;
288         struct filter_entry *f;
289         struct rte_mbuf *mbuf;
290         struct sge_ctrl_txq *ctrlq;
291         unsigned int iq;
292         int atid, size;
293         int ret = 0;
294
295         ret = validate_filter(adapter, fs);
296         if (ret)
297                 return ret;
298
299         iq = get_filter_steerq(dev, fs);
300
301         ctrlq = &adapter->sge.ctrlq[pi->port_id];
302
303         f = t4_os_alloc(sizeof(*f));
304         if (!f)
305                 goto out_err;
306
307         f->fs = *fs;
308         f->ctx = ctx;
309         f->dev = dev;
310         f->fs.iq = iq;
311
312         atid = cxgbe_alloc_atid(t, f);
313         if (atid < 0)
314                 goto out_err;
315
316         if (f->fs.type) {
317                 /* IPv6 hash filter */
318                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
319                 if (!f->clipt)
320                         goto free_atid;
321
322                 size = sizeof(struct cpl_t6_act_open_req6);
323                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
324                 if (!mbuf) {
325                         ret = -ENOMEM;
326                         goto free_clip;
327                 }
328
329                 mbuf->data_len = size;
330                 mbuf->pkt_len = mbuf->data_len;
331
332                 mk_act_open_req6(f, mbuf,
333                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
334                                  adapter);
335         } else {
336                 /* IPv4 hash filter */
337                 size = sizeof(struct cpl_t6_act_open_req);
338                 mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
339                 if (!mbuf) {
340                         ret = -ENOMEM;
341                         goto free_atid;
342                 }
343
344                 mbuf->data_len = size;
345                 mbuf->pkt_len = mbuf->data_len;
346
347                 mk_act_open_req(f, mbuf,
348                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
349                                 adapter);
350         }
351
352         f->pending = 1;
353         t4_mgmt_tx(ctrlq, mbuf);
354         return 0;
355
356 free_clip:
357         cxgbe_clip_release(f->dev, f->clipt);
358 free_atid:
359         cxgbe_free_atid(t, atid);
360
361 out_err:
362         t4_os_free(f);
363         return ret;
364 }
365
366 /**
367  * Clear a filter and release any of its resources that we own.  This also
368  * clears the filter's "pending" status.
369  */
370 void clear_filter(struct filter_entry *f)
371 {
372         if (f->clipt)
373                 cxgbe_clip_release(f->dev, f->clipt);
374
375         /*
376          * The zeroing of the filter rule below clears the filter valid,
377          * pending, locked flags etc. so it's all we need for
378          * this operation.
379          */
380         memset(f, 0, sizeof(*f));
381 }
382
383 /**
384  * t4_mk_filtdelwr - create a delete filter WR
385  * @ftid: the filter ID
386  * @wr: the filter work request to populate
387  * @qid: ingress queue to receive the delete notification
388  *
389  * Creates a filter work request to delete the supplied filter.  If @qid is
390  * negative the delete notification is suppressed.
391  */
392 static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
393 {
394         memset(wr, 0, sizeof(*wr));
395         wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
396         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
397         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
398                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
399         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
400         if (qid >= 0)
401                 wr->rx_chan_rx_rpl_iq =
402                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
403 }
404
405 /**
406  * Create FW work request to delete the filter at a specified index
407  */
408 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
409 {
410         struct adapter *adapter = ethdev2adap(dev);
411         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
412         struct rte_mbuf *mbuf;
413         struct fw_filter_wr *fwr;
414         struct sge_ctrl_txq *ctrlq;
415         unsigned int port_id = ethdev2pinfo(dev)->port_id;
416
417         ctrlq = &adapter->sge.ctrlq[port_id];
418         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
419         if (!mbuf)
420                 return -ENOMEM;
421
422         mbuf->data_len = sizeof(*fwr);
423         mbuf->pkt_len = mbuf->data_len;
424
425         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
426         t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
427
428         /*
429          * Mark the filter as "pending" and ship off the Filter Work Request.
430          * When we get the Work Request Reply we'll clear the pending status.
431          */
432         f->pending = 1;
433         t4_mgmt_tx(ctrlq, mbuf);
434         return 0;
435 }
436
437 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
438 {
439         struct adapter *adapter = ethdev2adap(dev);
440         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
441         struct rte_mbuf *mbuf;
442         struct fw_filter_wr *fwr;
443         struct sge_ctrl_txq *ctrlq;
444         unsigned int port_id = ethdev2pinfo(dev)->port_id;
445         int ret;
446
447         ctrlq = &adapter->sge.ctrlq[port_id];
448         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
449         if (!mbuf) {
450                 ret = -ENOMEM;
451                 goto out;
452         }
453
454         mbuf->data_len = sizeof(*fwr);
455         mbuf->pkt_len = mbuf->data_len;
456
457         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
458         memset(fwr, 0, sizeof(*fwr));
459
460         /*
461          * Construct the work request to set the filter.
462          */
463         fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
464         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
465         fwr->tid_to_iq =
466                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
467                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
468                             V_FW_FILTER_WR_NOREPLY(0) |
469                             V_FW_FILTER_WR_IQ(f->fs.iq));
470         fwr->del_filter_to_l2tix =
471                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
472                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
473                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
474                             V_FW_FILTER_WR_PRIO(f->fs.prio));
475         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
476         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
477         fwr->smac_sel = 0;
478         fwr->rx_chan_rx_rpl_iq =
479                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
480                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
481                                                      ));
482         fwr->ptcl = f->fs.val.proto;
483         fwr->ptclm = f->fs.mask.proto;
484         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
485         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
486         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
487         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
488         fwr->lp = cpu_to_be16(f->fs.val.lport);
489         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
490         fwr->fp = cpu_to_be16(f->fs.val.fport);
491         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
492
493         /*
494          * Mark the filter as "pending" and ship off the Filter Work Request.
495          * When we get the Work Request Reply we'll clear the pending status.
496          */
497         f->pending = 1;
498         t4_mgmt_tx(ctrlq, mbuf);
499         return 0;
500
501 out:
502         return ret;
503 }
504
505 /**
506  * Set the corresponding entry in the bitmap. 4 slots are
507  * marked for IPv6, whereas only 1 slot is marked for IPv4.
508  */
509 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
510 {
511         t4_os_lock(&t->ftid_lock);
512         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
513                 t4_os_unlock(&t->ftid_lock);
514                 return -EBUSY;
515         }
516
517         if (family == FILTER_TYPE_IPV4) {
518                 rte_bitmap_set(t->ftid_bmap, fidx);
519         } else {
520                 rte_bitmap_set(t->ftid_bmap, fidx);
521                 rte_bitmap_set(t->ftid_bmap, fidx + 1);
522                 rte_bitmap_set(t->ftid_bmap, fidx + 2);
523                 rte_bitmap_set(t->ftid_bmap, fidx + 3);
524         }
525         t4_os_unlock(&t->ftid_lock);
526         return 0;
527 }
528
529 /**
530  * Clear the corresponding entry in the bitmap. 4 slots are
531  * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
532  */
533 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
534 {
535         t4_os_lock(&t->ftid_lock);
536         if (family == FILTER_TYPE_IPV4) {
537                 rte_bitmap_clear(t->ftid_bmap, fidx);
538         } else {
539                 rte_bitmap_clear(t->ftid_bmap, fidx);
540                 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
541                 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
542                 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
543         }
544         t4_os_unlock(&t->ftid_lock);
545 }
546
547 /**
548  * Check a delete filter request for validity and send it to the hardware.
549  * Return 0 on success, an error number otherwise.  We attach any provided
550  * filter operation context to the internal filter specification in order to
551  * facilitate signaling completion of the operation.
552  */
553 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
554                      struct ch_filter_specification *fs,
555                      struct filter_ctx *ctx)
556 {
557         struct port_info *pi = (struct port_info *)(dev->data->dev_private);
558         struct adapter *adapter = pi->adapter;
559         struct filter_entry *f;
560         unsigned int chip_ver;
561         int ret;
562
563         if (filter_id >= adapter->tids.nftids)
564                 return -ERANGE;
565
566         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
567
568         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
569         if (!ret) {
570                 dev_warn(adap, "%s: could not find filter entry: %u\n",
571                          __func__, filter_id);
572                 return -EINVAL;
573         }
574
575         /*
576          * Ensure filter id is aligned on the 2 slot boundary for T6,
577          * and 4 slot boundary for cards below T6.
578          */
579         if (fs->type) {
580                 if (chip_ver < CHELSIO_T6)
581                         filter_id &= ~(0x3);
582                 else
583                         filter_id &= ~(0x1);
584         }
585
586         f = &adapter->tids.ftid_tab[filter_id];
587         ret = writable_filter(f);
588         if (ret)
589                 return ret;
590
591         if (f->valid) {
592                 f->ctx = ctx;
593                 cxgbe_clear_ftid(&adapter->tids,
594                                  f->tid - adapter->tids.ftid_base,
595                                  f->fs.type ? FILTER_TYPE_IPV6 :
596                                               FILTER_TYPE_IPV4);
597                 return del_filter_wr(dev, filter_id);
598         }
599
600         /*
601          * If the caller has passed in a Completion Context then we need to
602          * mark it as a successful completion so they don't stall waiting
603          * for it.
604          */
605         if (ctx) {
606                 ctx->result = 0;
607                 t4_complete(&ctx->completion);
608         }
609
610         return 0;
611 }
612
613 /**
614  * Check a Chelsio Filter Request for validity, convert it into our internal
615  * format and send it to the hardware.  Return 0 on success, an error number
616  * otherwise.  We attach any provided filter operation context to the internal
617  * filter specification in order to facilitate signaling completion of the
618  * operation.
619  */
620 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
621                      struct ch_filter_specification *fs,
622                      struct filter_ctx *ctx)
623 {
624         struct port_info *pi = ethdev2pinfo(dev);
625         struct adapter *adapter = pi->adapter;
626         unsigned int fidx, iq, fid_bit = 0;
627         struct filter_entry *f;
628         unsigned int chip_ver;
629         uint8_t bitoff[16] = {0};
630         int ret;
631
632         if (is_hashfilter(adapter) && fs->cap)
633                 return cxgbe_set_hash_filter(dev, fs, ctx);
634
635         if (filter_id >= adapter->tids.nftids)
636                 return -ERANGE;
637
638         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
639
640         ret = validate_filter(adapter, fs);
641         if (ret)
642                 return ret;
643
644         /*
645          * Ensure filter id is aligned on the 4 slot boundary for IPv6
646          * maskfull filters.
647          */
648         if (fs->type)
649                 filter_id &= ~(0x3);
650
651         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
652         if (ret)
653                 return -EBUSY;
654
655         iq = get_filter_steerq(dev, fs);
656
657         /*
658          * IPv6 filters occupy four slots and must be aligned on four-slot
659          * boundaries for T5. On T6, IPv6 filters occupy two-slots and
660          * must be aligned on two-slot boundaries.
661          *
662          * IPv4 filters only occupy a single slot and have no alignment
663          * requirements but writing a new IPv4 filter into the middle
664          * of an existing IPv6 filter requires clearing the old IPv6
665          * filter.
666          */
667         if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
668                 /*
669                  * For T6, If our IPv4 filter isn't being written to a
670                  * multiple of two filter index and there's an IPv6
671                  * filter at the multiple of 2 base slot, then we need
672                  * to delete that IPv6 filter ...
673                  * For adapters below T6, IPv6 filter occupies 4 entries.
674                  */
675                 if (chip_ver < CHELSIO_T6)
676                         fidx = filter_id & ~0x3;
677                 else
678                         fidx = filter_id & ~0x1;
679
680                 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
681                         f = &adapter->tids.ftid_tab[fidx];
682                         if (f->valid)
683                                 return -EBUSY;
684                 }
685         } else { /* IPv6 */
686                 unsigned int max_filter_id;
687
688                 if (chip_ver < CHELSIO_T6) {
689                         /*
690                          * Ensure that the IPv6 filter is aligned on a
691                          * multiple of 4 boundary.
692                          */
693                         if (filter_id & 0x3)
694                                 return -EINVAL;
695
696                         max_filter_id = filter_id + 4;
697                 } else {
698                         /*
699                          * For T6, CLIP being enabled, IPv6 filter would occupy
700                          * 2 entries.
701                          */
702                         if (filter_id & 0x1)
703                                 return -EINVAL;
704
705                         max_filter_id = filter_id + 2;
706                 }
707
708                 /*
709                  * Check all except the base overlapping IPv4 filter
710                  * slots.
711                  */
712                 for (fidx = filter_id + 1; fidx < max_filter_id; fidx++) {
713                         f = &adapter->tids.ftid_tab[fidx];
714                         if (f->valid)
715                                 return -EBUSY;
716                 }
717         }
718
719         /*
720          * Check to make sure that provided filter index is not
721          * already in use by someone else
722          */
723         f = &adapter->tids.ftid_tab[filter_id];
724         if (f->valid)
725                 return -EBUSY;
726
727         fidx = adapter->tids.ftid_base + filter_id;
728         fid_bit = filter_id;
729         ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
730                              fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
731         if (ret)
732                 return ret;
733
734         /*
735          * Check to make sure the filter requested is writable ...
736          */
737         ret = writable_filter(f);
738         if (ret) {
739                 /* Clear the bits we have set above */
740                 cxgbe_clear_ftid(&adapter->tids, fid_bit,
741                                  fs->type ? FILTER_TYPE_IPV6 :
742                                             FILTER_TYPE_IPV4);
743                 return ret;
744         }
745
746         /*
747          * Allocate a clip table entry only if we have non-zero IPv6 address
748          */
749         if (chip_ver > CHELSIO_T5 && fs->type &&
750             memcmp(fs->val.lip, bitoff, sizeof(bitoff))) {
751                 f->clipt = cxgbe_clip_alloc(f->dev, (u32 *)&f->fs.val.lip);
752                 if (!f->clipt)
753                         goto free_tid;
754         }
755
756         /*
757          * Convert the filter specification into our internal format.
758          * We copy the PF/VF specification into the Outer VLAN field
759          * here so the rest of the code -- including the interface to
760          * the firmware -- doesn't have to constantly do these checks.
761          */
762         f->fs = *fs;
763         f->fs.iq = iq;
764         f->dev = dev;
765
766         /*
767          * Attempt to set the filter.  If we don't succeed, we clear
768          * it and return the failure.
769          */
770         f->ctx = ctx;
771         f->tid = fidx; /* Save the actual tid */
772         ret = set_filter_wr(dev, filter_id);
773         if (ret) {
774                 fid_bit = f->tid - adapter->tids.ftid_base;
775                 goto free_tid;
776         }
777
778         return ret;
779
780 free_tid:
781         cxgbe_clear_ftid(&adapter->tids, fid_bit,
782                          fs->type ? FILTER_TYPE_IPV6 :
783                                     FILTER_TYPE_IPV4);
784         clear_filter(f);
785         return ret;
786 }
787
788 /**
789  * Handle a Hash filter write reply.
790  */
791 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
792 {
793         struct tid_info *t = &adap->tids;
794         struct filter_entry *f;
795         struct filter_ctx *ctx = NULL;
796         unsigned int tid = GET_TID(rpl);
797         unsigned int ftid = G_TID_TID(G_AOPEN_ATID
798                                       (be32_to_cpu(rpl->atid_status)));
799         unsigned int status  = G_AOPEN_STATUS(be32_to_cpu(rpl->atid_status));
800
801         f = lookup_atid(t, ftid);
802         if (!f) {
803                 dev_warn(adap, "%s: could not find filter entry: %d\n",
804                          __func__, ftid);
805                 return;
806         }
807
808         ctx = f->ctx;
809         f->ctx = NULL;
810
811         switch (status) {
812         case CPL_ERR_NONE: {
813                 f->tid = tid;
814                 f->pending = 0;  /* asynchronous setup completed */
815                 f->valid = 1;
816
817                 cxgbe_insert_tid(t, f, f->tid, 0);
818                 cxgbe_free_atid(t, ftid);
819                 if (ctx) {
820                         ctx->tid = f->tid;
821                         ctx->result = 0;
822                 }
823                 break;
824         }
825         default:
826                 dev_warn(adap, "%s: filter creation failed with status = %u\n",
827                          __func__, status);
828
829                 if (ctx) {
830                         if (status == CPL_ERR_TCAM_FULL)
831                                 ctx->result = -EAGAIN;
832                         else
833                                 ctx->result = -EINVAL;
834                 }
835
836                 cxgbe_free_atid(t, ftid);
837                 t4_os_free(f);
838         }
839
840         if (ctx)
841                 t4_complete(&ctx->completion);
842 }
843
844 /**
845  * Handle a LE-TCAM filter write/deletion reply.
846  */
847 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
848 {
849         struct filter_entry *f = NULL;
850         unsigned int tid = GET_TID(rpl);
851         int idx, max_fidx = adap->tids.nftids;
852
853         /* Get the corresponding filter entry for this tid */
854         if (adap->tids.ftid_tab) {
855                 /* Check this in normal filter region */
856                 idx = tid - adap->tids.ftid_base;
857                 if (idx >= max_fidx)
858                         return;
859
860                 f = &adap->tids.ftid_tab[idx];
861                 if (f->tid != tid)
862                         return;
863         }
864
865         /* We found the filter entry for this tid */
866         if (f) {
867                 unsigned int ret = G_COOKIE(rpl->cookie);
868                 struct filter_ctx *ctx;
869
870                 /*
871                  * Pull off any filter operation context attached to the
872                  * filter.
873                  */
874                 ctx = f->ctx;
875                 f->ctx = NULL;
876
877                 if (ret == FW_FILTER_WR_FLT_ADDED) {
878                         f->pending = 0;  /* asynchronous setup completed */
879                         f->valid = 1;
880                         if (ctx) {
881                                 ctx->tid = f->tid;
882                                 ctx->result = 0;
883                         }
884                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
885                         /*
886                          * Clear the filter when we get confirmation from the
887                          * hardware that the filter has been deleted.
888                          */
889                         clear_filter(f);
890                         if (ctx)
891                                 ctx->result = 0;
892                 } else {
893                         /*
894                          * Something went wrong.  Issue a warning about the
895                          * problem and clear everything out.
896                          */
897                         dev_warn(adap, "filter %u setup failed with error %u\n",
898                                  idx, ret);
899                         clear_filter(f);
900                         if (ctx)
901                                 ctx->result = -EINVAL;
902                 }
903
904                 if (ctx)
905                         t4_complete(&ctx->completion);
906         }
907 }
908
909 /*
910  * Retrieve the packet count for the specified filter.
911  */
912 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
913                            u64 *c, bool get_byte)
914 {
915         struct filter_entry *f;
916         unsigned int tcb_base, tcbaddr;
917         int ret;
918
919         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
920         if (fidx >= adapter->tids.nftids)
921                 return -ERANGE;
922
923         f = &adapter->tids.ftid_tab[fidx];
924         if (!f->valid)
925                 return -EINVAL;
926
927         tcbaddr = tcb_base + f->tid * TCB_SIZE;
928
929         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
930                 /*
931                  * For T5, the Filter Packet Hit Count is maintained as a
932                  * 32-bit Big Endian value in the TCB field {timestamp}.
933                  * Similar to the craziness above, instead of the filter hit
934                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
935                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
936                  */
937                 if (get_byte) {
938                         unsigned int word_offset = 4;
939                         __be64 be64_byte_count;
940
941                         t4_os_lock(&adapter->win0_lock);
942                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
943                                            tcbaddr +
944                                            (word_offset * sizeof(__be32)),
945                                            sizeof(be64_byte_count),
946                                            &be64_byte_count,
947                                            T4_MEMORY_READ);
948                         t4_os_unlock(&adapter->win0_lock);
949                         if (ret < 0)
950                                 return ret;
951                         *c = be64_to_cpu(be64_byte_count);
952                 } else {
953                         unsigned int word_offset = 6;
954                         __be32 be32_count;
955
956                         t4_os_lock(&adapter->win0_lock);
957                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
958                                            tcbaddr +
959                                            (word_offset * sizeof(__be32)),
960                                            sizeof(be32_count), &be32_count,
961                                            T4_MEMORY_READ);
962                         t4_os_unlock(&adapter->win0_lock);
963                         if (ret < 0)
964                                 return ret;
965                         *c = (u64)be32_to_cpu(be32_count);
966                 }
967         }
968         return 0;
969 }