a5d20d164e456707aaeadcb3d8c2e020f44fe473
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5
6 #include "common.h"
7 #include "t4_regs.h"
8 #include "cxgbe_filter.h"
9
10 /**
11  * Initialize Hash Filters
12  */
13 int init_hash_filter(struct adapter *adap)
14 {
15         unsigned int n_user_filters;
16         unsigned int user_filter_perc;
17         int ret;
18         u32 params[7], val[7];
19
20 #define FW_PARAM_DEV(param) \
21         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
22         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
23
24 #define FW_PARAM_PFVF(param) \
25         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
26         V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) |  \
27         V_FW_PARAMS_PARAM_Y(0) | \
28         V_FW_PARAMS_PARAM_Z(0))
29
30         params[0] = FW_PARAM_DEV(NTID);
31         ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
32                               params, val);
33         if (ret < 0)
34                 return ret;
35         adap->tids.ntids = val[0];
36         adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
37
38         user_filter_perc = 100;
39         n_user_filters = mult_frac(adap->tids.nftids,
40                                    user_filter_perc,
41                                    100);
42
43         adap->tids.nftids = n_user_filters;
44         adap->params.hash_filter = 1;
45         return 0;
46 }
47
48 /**
49  * Validate if the requested filter specification can be set by checking
50  * if the requested features have been enabled
51  */
52 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
53 {
54         u32 fconf;
55
56         /*
57          * Check for unconfigured fields being used.
58          */
59         fconf = adapter->params.tp.vlan_pri_map;
60
61 #define S(_field) \
62         (fs->val._field || fs->mask._field)
63 #define U(_mask, _field) \
64         (!(fconf & (_mask)) && S(_field))
65
66         if (U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
67                 return -EOPNOTSUPP;
68
69 #undef S
70 #undef U
71         return 0;
72 }
73
74 /**
75  * Get the queue to which the traffic must be steered to.
76  */
77 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
78                                       struct ch_filter_specification *fs)
79 {
80         struct port_info *pi = ethdev2pinfo(dev);
81         struct adapter *adapter = pi->adapter;
82         unsigned int iq;
83
84         /*
85          * If the user has requested steering matching Ingress Packets
86          * to a specific Queue Set, we need to make sure it's in range
87          * for the port and map that into the Absolute Queue ID of the
88          * Queue Set's Response Queue.
89          */
90         if (!fs->dirsteer) {
91                 iq = 0;
92         } else {
93                 /*
94                  * If the iq id is greater than the number of qsets,
95                  * then assume it is an absolute qid.
96                  */
97                 if (fs->iq < pi->n_rx_qsets)
98                         iq = adapter->sge.ethrxq[pi->first_qset +
99                                                  fs->iq].rspq.abs_id;
100                 else
101                         iq = fs->iq;
102         }
103
104         return iq;
105 }
106
107 /* Return an error number if the indicated filter isn't writable ... */
108 int writable_filter(struct filter_entry *f)
109 {
110         if (f->locked)
111                 return -EPERM;
112         if (f->pending)
113                 return -EBUSY;
114
115         return 0;
116 }
117
118 /**
119  * Check if entry already filled.
120  */
121 bool is_filter_set(struct tid_info *t, int fidx, int family)
122 {
123         bool result = FALSE;
124         int i, max;
125
126         /* IPv6 requires four slots and IPv4 requires only 1 slot.
127          * Ensure, there's enough slots available.
128          */
129         max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
130
131         t4_os_lock(&t->ftid_lock);
132         for (i = fidx; i <= max; i++) {
133                 if (rte_bitmap_get(t->ftid_bmap, i)) {
134                         result = TRUE;
135                         break;
136                 }
137         }
138         t4_os_unlock(&t->ftid_lock);
139         return result;
140 }
141
142 /**
143  * Allocate a available free entry
144  */
145 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
146 {
147         struct tid_info *t = &adap->tids;
148         int pos;
149         int size = t->nftids;
150
151         t4_os_lock(&t->ftid_lock);
152         if (family == FILTER_TYPE_IPV6)
153                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
154         else
155                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
156         t4_os_unlock(&t->ftid_lock);
157
158         return pos < size ? pos : -1;
159 }
160
161 /**
162  * Clear a filter and release any of its resources that we own.  This also
163  * clears the filter's "pending" status.
164  */
165 void clear_filter(struct filter_entry *f)
166 {
167         /*
168          * The zeroing of the filter rule below clears the filter valid,
169          * pending, locked flags etc. so it's all we need for
170          * this operation.
171          */
172         memset(f, 0, sizeof(*f));
173 }
174
175 /**
176  * t4_mk_filtdelwr - create a delete filter WR
177  * @ftid: the filter ID
178  * @wr: the filter work request to populate
179  * @qid: ingress queue to receive the delete notification
180  *
181  * Creates a filter work request to delete the supplied filter.  If @qid is
182  * negative the delete notification is suppressed.
183  */
184 static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
185 {
186         memset(wr, 0, sizeof(*wr));
187         wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
188         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
189         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
190                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
191         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
192         if (qid >= 0)
193                 wr->rx_chan_rx_rpl_iq =
194                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
195 }
196
197 /**
198  * Create FW work request to delete the filter at a specified index
199  */
200 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
201 {
202         struct adapter *adapter = ethdev2adap(dev);
203         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
204         struct rte_mbuf *mbuf;
205         struct fw_filter_wr *fwr;
206         struct sge_ctrl_txq *ctrlq;
207         unsigned int port_id = ethdev2pinfo(dev)->port_id;
208
209         ctrlq = &adapter->sge.ctrlq[port_id];
210         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
211         if (!mbuf)
212                 return -ENOMEM;
213
214         mbuf->data_len = sizeof(*fwr);
215         mbuf->pkt_len = mbuf->data_len;
216
217         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
218         t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
219
220         /*
221          * Mark the filter as "pending" and ship off the Filter Work Request.
222          * When we get the Work Request Reply we'll clear the pending status.
223          */
224         f->pending = 1;
225         t4_mgmt_tx(ctrlq, mbuf);
226         return 0;
227 }
228
229 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
230 {
231         struct adapter *adapter = ethdev2adap(dev);
232         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
233         struct rte_mbuf *mbuf;
234         struct fw_filter_wr *fwr;
235         struct sge_ctrl_txq *ctrlq;
236         unsigned int port_id = ethdev2pinfo(dev)->port_id;
237         int ret;
238
239         ctrlq = &adapter->sge.ctrlq[port_id];
240         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
241         if (!mbuf) {
242                 ret = -ENOMEM;
243                 goto out;
244         }
245
246         mbuf->data_len = sizeof(*fwr);
247         mbuf->pkt_len = mbuf->data_len;
248
249         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
250         memset(fwr, 0, sizeof(*fwr));
251
252         /*
253          * Construct the work request to set the filter.
254          */
255         fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
256         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
257         fwr->tid_to_iq =
258                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
259                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
260                             V_FW_FILTER_WR_NOREPLY(0) |
261                             V_FW_FILTER_WR_IQ(f->fs.iq));
262         fwr->del_filter_to_l2tix =
263                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
264                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
265                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
266                             V_FW_FILTER_WR_PRIO(f->fs.prio));
267         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
268         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
269         fwr->smac_sel = 0;
270         fwr->rx_chan_rx_rpl_iq =
271                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
272                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
273                                                      ));
274         fwr->ptcl = f->fs.val.proto;
275         fwr->ptclm = f->fs.mask.proto;
276         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
277         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
278         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
279         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
280         fwr->lp = cpu_to_be16(f->fs.val.lport);
281         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
282         fwr->fp = cpu_to_be16(f->fs.val.fport);
283         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
284
285         /*
286          * Mark the filter as "pending" and ship off the Filter Work Request.
287          * When we get the Work Request Reply we'll clear the pending status.
288          */
289         f->pending = 1;
290         t4_mgmt_tx(ctrlq, mbuf);
291         return 0;
292
293 out:
294         return ret;
295 }
296
297 /**
298  * Set the corresponding entry in the bitmap. 4 slots are
299  * marked for IPv6, whereas only 1 slot is marked for IPv4.
300  */
301 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
302 {
303         t4_os_lock(&t->ftid_lock);
304         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
305                 t4_os_unlock(&t->ftid_lock);
306                 return -EBUSY;
307         }
308
309         if (family == FILTER_TYPE_IPV4) {
310                 rte_bitmap_set(t->ftid_bmap, fidx);
311         } else {
312                 rte_bitmap_set(t->ftid_bmap, fidx);
313                 rte_bitmap_set(t->ftid_bmap, fidx + 1);
314                 rte_bitmap_set(t->ftid_bmap, fidx + 2);
315                 rte_bitmap_set(t->ftid_bmap, fidx + 3);
316         }
317         t4_os_unlock(&t->ftid_lock);
318         return 0;
319 }
320
321 /**
322  * Clear the corresponding entry in the bitmap. 4 slots are
323  * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
324  */
325 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
326 {
327         t4_os_lock(&t->ftid_lock);
328         if (family == FILTER_TYPE_IPV4) {
329                 rte_bitmap_clear(t->ftid_bmap, fidx);
330         } else {
331                 rte_bitmap_clear(t->ftid_bmap, fidx);
332                 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
333                 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
334                 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
335         }
336         t4_os_unlock(&t->ftid_lock);
337 }
338
339 /**
340  * Check a delete filter request for validity and send it to the hardware.
341  * Return 0 on success, an error number otherwise.  We attach any provided
342  * filter operation context to the internal filter specification in order to
343  * facilitate signaling completion of the operation.
344  */
345 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
346                      struct ch_filter_specification *fs,
347                      struct filter_ctx *ctx)
348 {
349         struct port_info *pi = (struct port_info *)(dev->data->dev_private);
350         struct adapter *adapter = pi->adapter;
351         struct filter_entry *f;
352         int ret;
353
354         if (filter_id >= adapter->tids.nftids)
355                 return -ERANGE;
356
357         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
358         if (!ret) {
359                 dev_warn(adap, "%s: could not find filter entry: %u\n",
360                          __func__, filter_id);
361                 return -EINVAL;
362         }
363
364         f = &adapter->tids.ftid_tab[filter_id];
365         ret = writable_filter(f);
366         if (ret)
367                 return ret;
368
369         if (f->valid) {
370                 f->ctx = ctx;
371                 cxgbe_clear_ftid(&adapter->tids,
372                                  f->tid - adapter->tids.ftid_base,
373                                  f->fs.type ? FILTER_TYPE_IPV6 :
374                                               FILTER_TYPE_IPV4);
375                 return del_filter_wr(dev, filter_id);
376         }
377
378         /*
379          * If the caller has passed in a Completion Context then we need to
380          * mark it as a successful completion so they don't stall waiting
381          * for it.
382          */
383         if (ctx) {
384                 ctx->result = 0;
385                 t4_complete(&ctx->completion);
386         }
387
388         return 0;
389 }
390
391 /**
392  * Check a Chelsio Filter Request for validity, convert it into our internal
393  * format and send it to the hardware.  Return 0 on success, an error number
394  * otherwise.  We attach any provided filter operation context to the internal
395  * filter specification in order to facilitate signaling completion of the
396  * operation.
397  */
398 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
399                      struct ch_filter_specification *fs,
400                      struct filter_ctx *ctx)
401 {
402         struct port_info *pi = ethdev2pinfo(dev);
403         struct adapter *adapter = pi->adapter;
404         unsigned int fidx, iq, fid_bit = 0;
405         struct filter_entry *f;
406         int ret;
407
408         if (filter_id >= adapter->tids.nftids)
409                 return -ERANGE;
410
411         ret = validate_filter(adapter, fs);
412         if (ret)
413                 return ret;
414
415         /*
416          * Ensure filter id is aligned on the 4 slot boundary for IPv6
417          * maskfull filters.
418          */
419         if (fs->type)
420                 filter_id &= ~(0x3);
421
422         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
423         if (ret)
424                 return -EBUSY;
425
426         iq = get_filter_steerq(dev, fs);
427
428         /*
429          * IPv6 filters occupy four slots and must be aligned on
430          * four-slot boundaries.  IPv4 filters only occupy a single
431          * slot and have no alignment requirements but writing a new
432          * IPv4 filter into the middle of an existing IPv6 filter
433          * requires clearing the old IPv6 filter.
434          */
435         if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
436                 /*
437                  * If our IPv4 filter isn't being written to a
438                  * multiple of four filter index and there's an IPv6
439                  * filter at the multiple of 4 base slot, then we need
440                  * to delete that IPv6 filter ...
441                  */
442                 fidx = filter_id & ~0x3;
443                 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
444                         f = &adapter->tids.ftid_tab[fidx];
445                         if (f->valid)
446                                 return -EBUSY;
447                 }
448         } else { /* IPv6 */
449                 /*
450                  * Ensure that the IPv6 filter is aligned on a
451                  * multiple of 4 boundary.
452                  */
453                 if (filter_id & 0x3)
454                         return -EINVAL;
455
456                 /*
457                  * Check all except the base overlapping IPv4 filter
458                  * slots.
459                  */
460                 for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
461                         f = &adapter->tids.ftid_tab[fidx];
462                         if (f->valid)
463                                 return -EBUSY;
464                 }
465         }
466
467         /*
468          * Check to make sure that provided filter index is not
469          * already in use by someone else
470          */
471         f = &adapter->tids.ftid_tab[filter_id];
472         if (f->valid)
473                 return -EBUSY;
474
475         fidx = adapter->tids.ftid_base + filter_id;
476         fid_bit = filter_id;
477         ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
478                              fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
479         if (ret)
480                 return ret;
481
482         /*
483          * Check to make sure the filter requested is writable ...
484          */
485         ret = writable_filter(f);
486         if (ret) {
487                 /* Clear the bits we have set above */
488                 cxgbe_clear_ftid(&adapter->tids, fid_bit,
489                                  fs->type ? FILTER_TYPE_IPV6 :
490                                             FILTER_TYPE_IPV4);
491                 return ret;
492         }
493
494         /*
495          * Convert the filter specification into our internal format.
496          * We copy the PF/VF specification into the Outer VLAN field
497          * here so the rest of the code -- including the interface to
498          * the firmware -- doesn't have to constantly do these checks.
499          */
500         f->fs = *fs;
501         f->fs.iq = iq;
502         f->dev = dev;
503
504         /*
505          * Attempt to set the filter.  If we don't succeed, we clear
506          * it and return the failure.
507          */
508         f->ctx = ctx;
509         f->tid = fidx; /* Save the actual tid */
510         ret = set_filter_wr(dev, filter_id);
511         if (ret) {
512                 fid_bit = f->tid - adapter->tids.ftid_base;
513                 cxgbe_clear_ftid(&adapter->tids, fid_bit,
514                                  fs->type ? FILTER_TYPE_IPV6 :
515                                             FILTER_TYPE_IPV4);
516                 clear_filter(f);
517         }
518
519         return ret;
520 }
521
522 /**
523  * Handle a LE-TCAM filter write/deletion reply.
524  */
525 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
526 {
527         struct filter_entry *f = NULL;
528         unsigned int tid = GET_TID(rpl);
529         int idx, max_fidx = adap->tids.nftids;
530
531         /* Get the corresponding filter entry for this tid */
532         if (adap->tids.ftid_tab) {
533                 /* Check this in normal filter region */
534                 idx = tid - adap->tids.ftid_base;
535                 if (idx >= max_fidx)
536                         return;
537
538                 f = &adap->tids.ftid_tab[idx];
539                 if (f->tid != tid)
540                         return;
541         }
542
543         /* We found the filter entry for this tid */
544         if (f) {
545                 unsigned int ret = G_COOKIE(rpl->cookie);
546                 struct filter_ctx *ctx;
547
548                 /*
549                  * Pull off any filter operation context attached to the
550                  * filter.
551                  */
552                 ctx = f->ctx;
553                 f->ctx = NULL;
554
555                 if (ret == FW_FILTER_WR_FLT_ADDED) {
556                         f->pending = 0;  /* asynchronous setup completed */
557                         f->valid = 1;
558                         if (ctx) {
559                                 ctx->tid = f->tid;
560                                 ctx->result = 0;
561                         }
562                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
563                         /*
564                          * Clear the filter when we get confirmation from the
565                          * hardware that the filter has been deleted.
566                          */
567                         clear_filter(f);
568                         if (ctx)
569                                 ctx->result = 0;
570                 } else {
571                         /*
572                          * Something went wrong.  Issue a warning about the
573                          * problem and clear everything out.
574                          */
575                         dev_warn(adap, "filter %u setup failed with error %u\n",
576                                  idx, ret);
577                         clear_filter(f);
578                         if (ctx)
579                                 ctx->result = -EINVAL;
580                 }
581
582                 if (ctx)
583                         t4_complete(&ctx->completion);
584         }
585 }
586
587 /*
588  * Retrieve the packet count for the specified filter.
589  */
590 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
591                            u64 *c, bool get_byte)
592 {
593         struct filter_entry *f;
594         unsigned int tcb_base, tcbaddr;
595         int ret;
596
597         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
598         if (fidx >= adapter->tids.nftids)
599                 return -ERANGE;
600
601         f = &adapter->tids.ftid_tab[fidx];
602         if (!f->valid)
603                 return -EINVAL;
604
605         tcbaddr = tcb_base + f->tid * TCB_SIZE;
606
607         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
608                 /*
609                  * For T5, the Filter Packet Hit Count is maintained as a
610                  * 32-bit Big Endian value in the TCB field {timestamp}.
611                  * Similar to the craziness above, instead of the filter hit
612                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
613                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
614                  */
615                 if (get_byte) {
616                         unsigned int word_offset = 4;
617                         __be64 be64_byte_count;
618
619                         t4_os_lock(&adapter->win0_lock);
620                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
621                                            tcbaddr +
622                                            (word_offset * sizeof(__be32)),
623                                            sizeof(be64_byte_count),
624                                            &be64_byte_count,
625                                            T4_MEMORY_READ);
626                         t4_os_unlock(&adapter->win0_lock);
627                         if (ret < 0)
628                                 return ret;
629                         *c = be64_to_cpu(be64_byte_count);
630                 } else {
631                         unsigned int word_offset = 6;
632                         __be32 be32_count;
633
634                         t4_os_lock(&adapter->win0_lock);
635                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
636                                            tcbaddr +
637                                            (word_offset * sizeof(__be32)),
638                                            sizeof(be32_count), &be32_count,
639                                            T4_MEMORY_READ);
640                         t4_os_unlock(&adapter->win0_lock);
641                         if (ret < 0)
642                                 return ret;
643                         *c = (u64)be32_to_cpu(be32_count);
644                 }
645         }
646         return 0;
647 }