net/cxgbe: implement flow query operation
[dpdk.git] / drivers / net / cxgbe / cxgbe_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Chelsio Communications.
3  * All rights reserved.
4  */
5
6 #include "common.h"
7 #include "t4_regs.h"
8 #include "cxgbe_filter.h"
9
10 /**
11  * Validate if the requested filter specification can be set by checking
12  * if the requested features have been enabled
13  */
14 int validate_filter(struct adapter *adapter, struct ch_filter_specification *fs)
15 {
16         u32 fconf;
17
18         /*
19          * Check for unconfigured fields being used.
20          */
21         fconf = adapter->params.tp.vlan_pri_map;
22
23 #define S(_field) \
24         (fs->val._field || fs->mask._field)
25 #define U(_mask, _field) \
26         (!(fconf & (_mask)) && S(_field))
27
28         if (U(F_ETHERTYPE, ethtype) || U(F_PROTOCOL, proto))
29                 return -EOPNOTSUPP;
30
31 #undef S
32 #undef U
33         return 0;
34 }
35
36 /**
37  * Get the queue to which the traffic must be steered to.
38  */
39 static unsigned int get_filter_steerq(struct rte_eth_dev *dev,
40                                       struct ch_filter_specification *fs)
41 {
42         struct port_info *pi = ethdev2pinfo(dev);
43         struct adapter *adapter = pi->adapter;
44         unsigned int iq;
45
46         /*
47          * If the user has requested steering matching Ingress Packets
48          * to a specific Queue Set, we need to make sure it's in range
49          * for the port and map that into the Absolute Queue ID of the
50          * Queue Set's Response Queue.
51          */
52         if (!fs->dirsteer) {
53                 iq = 0;
54         } else {
55                 /*
56                  * If the iq id is greater than the number of qsets,
57                  * then assume it is an absolute qid.
58                  */
59                 if (fs->iq < pi->n_rx_qsets)
60                         iq = adapter->sge.ethrxq[pi->first_qset +
61                                                  fs->iq].rspq.abs_id;
62                 else
63                         iq = fs->iq;
64         }
65
66         return iq;
67 }
68
69 /* Return an error number if the indicated filter isn't writable ... */
70 int writable_filter(struct filter_entry *f)
71 {
72         if (f->locked)
73                 return -EPERM;
74         if (f->pending)
75                 return -EBUSY;
76
77         return 0;
78 }
79
80 /**
81  * Check if entry already filled.
82  */
83 bool is_filter_set(struct tid_info *t, int fidx, int family)
84 {
85         bool result = FALSE;
86         int i, max;
87
88         /* IPv6 requires four slots and IPv4 requires only 1 slot.
89          * Ensure, there's enough slots available.
90          */
91         max = family == FILTER_TYPE_IPV6 ? fidx + 3 : fidx;
92
93         t4_os_lock(&t->ftid_lock);
94         for (i = fidx; i <= max; i++) {
95                 if (rte_bitmap_get(t->ftid_bmap, i)) {
96                         result = TRUE;
97                         break;
98                 }
99         }
100         t4_os_unlock(&t->ftid_lock);
101         return result;
102 }
103
104 /**
105  * Allocate a available free entry
106  */
107 int cxgbe_alloc_ftid(struct adapter *adap, unsigned int family)
108 {
109         struct tid_info *t = &adap->tids;
110         int pos;
111         int size = t->nftids;
112
113         t4_os_lock(&t->ftid_lock);
114         if (family == FILTER_TYPE_IPV6)
115                 pos = cxgbe_bitmap_find_free_region(t->ftid_bmap, size, 4);
116         else
117                 pos = cxgbe_find_first_zero_bit(t->ftid_bmap, size);
118         t4_os_unlock(&t->ftid_lock);
119
120         return pos < size ? pos : -1;
121 }
122
123 /**
124  * Clear a filter and release any of its resources that we own.  This also
125  * clears the filter's "pending" status.
126  */
127 void clear_filter(struct filter_entry *f)
128 {
129         /*
130          * The zeroing of the filter rule below clears the filter valid,
131          * pending, locked flags etc. so it's all we need for
132          * this operation.
133          */
134         memset(f, 0, sizeof(*f));
135 }
136
137 /**
138  * t4_mk_filtdelwr - create a delete filter WR
139  * @ftid: the filter ID
140  * @wr: the filter work request to populate
141  * @qid: ingress queue to receive the delete notification
142  *
143  * Creates a filter work request to delete the supplied filter.  If @qid is
144  * negative the delete notification is suppressed.
145  */
146 static void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
147 {
148         memset(wr, 0, sizeof(*wr));
149         wr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
150         wr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*wr) / 16));
151         wr->tid_to_iq = cpu_to_be32(V_FW_FILTER_WR_TID(ftid) |
152                                     V_FW_FILTER_WR_NOREPLY(qid < 0));
153         wr->del_filter_to_l2tix = cpu_to_be32(F_FW_FILTER_WR_DEL_FILTER);
154         if (qid >= 0)
155                 wr->rx_chan_rx_rpl_iq =
156                                 cpu_to_be16(V_FW_FILTER_WR_RX_RPL_IQ(qid));
157 }
158
159 /**
160  * Create FW work request to delete the filter at a specified index
161  */
162 static int del_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
163 {
164         struct adapter *adapter = ethdev2adap(dev);
165         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
166         struct rte_mbuf *mbuf;
167         struct fw_filter_wr *fwr;
168         struct sge_ctrl_txq *ctrlq;
169         unsigned int port_id = ethdev2pinfo(dev)->port_id;
170
171         ctrlq = &adapter->sge.ctrlq[port_id];
172         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
173         if (!mbuf)
174                 return -ENOMEM;
175
176         mbuf->data_len = sizeof(*fwr);
177         mbuf->pkt_len = mbuf->data_len;
178
179         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
180         t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
181
182         /*
183          * Mark the filter as "pending" and ship off the Filter Work Request.
184          * When we get the Work Request Reply we'll clear the pending status.
185          */
186         f->pending = 1;
187         t4_mgmt_tx(ctrlq, mbuf);
188         return 0;
189 }
190
191 int set_filter_wr(struct rte_eth_dev *dev, unsigned int fidx)
192 {
193         struct adapter *adapter = ethdev2adap(dev);
194         struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
195         struct rte_mbuf *mbuf;
196         struct fw_filter_wr *fwr;
197         struct sge_ctrl_txq *ctrlq;
198         unsigned int port_id = ethdev2pinfo(dev)->port_id;
199         int ret;
200
201         ctrlq = &adapter->sge.ctrlq[port_id];
202         mbuf = rte_pktmbuf_alloc(ctrlq->mb_pool);
203         if (!mbuf) {
204                 ret = -ENOMEM;
205                 goto out;
206         }
207
208         mbuf->data_len = sizeof(*fwr);
209         mbuf->pkt_len = mbuf->data_len;
210
211         fwr = rte_pktmbuf_mtod(mbuf, struct fw_filter_wr *);
212         memset(fwr, 0, sizeof(*fwr));
213
214         /*
215          * Construct the work request to set the filter.
216          */
217         fwr->op_pkd = cpu_to_be32(V_FW_WR_OP(FW_FILTER_WR));
218         fwr->len16_pkd = cpu_to_be32(V_FW_WR_LEN16(sizeof(*fwr) / 16));
219         fwr->tid_to_iq =
220                 cpu_to_be32(V_FW_FILTER_WR_TID(f->tid) |
221                             V_FW_FILTER_WR_RQTYPE(f->fs.type) |
222                             V_FW_FILTER_WR_NOREPLY(0) |
223                             V_FW_FILTER_WR_IQ(f->fs.iq));
224         fwr->del_filter_to_l2tix =
225                 cpu_to_be32(V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
226                             V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
227                             V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
228                             V_FW_FILTER_WR_PRIO(f->fs.prio));
229         fwr->ethtype = cpu_to_be16(f->fs.val.ethtype);
230         fwr->ethtypem = cpu_to_be16(f->fs.mask.ethtype);
231         fwr->smac_sel = 0;
232         fwr->rx_chan_rx_rpl_iq =
233                 cpu_to_be16(V_FW_FILTER_WR_RX_CHAN(0) |
234                             V_FW_FILTER_WR_RX_RPL_IQ(adapter->sge.fw_evtq.abs_id
235                                                      ));
236         fwr->ptcl = f->fs.val.proto;
237         fwr->ptclm = f->fs.mask.proto;
238         rte_memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
239         rte_memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
240         rte_memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
241         rte_memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
242         fwr->lp = cpu_to_be16(f->fs.val.lport);
243         fwr->lpm = cpu_to_be16(f->fs.mask.lport);
244         fwr->fp = cpu_to_be16(f->fs.val.fport);
245         fwr->fpm = cpu_to_be16(f->fs.mask.fport);
246
247         /*
248          * Mark the filter as "pending" and ship off the Filter Work Request.
249          * When we get the Work Request Reply we'll clear the pending status.
250          */
251         f->pending = 1;
252         t4_mgmt_tx(ctrlq, mbuf);
253         return 0;
254
255 out:
256         return ret;
257 }
258
259 /**
260  * Set the corresponding entry in the bitmap. 4 slots are
261  * marked for IPv6, whereas only 1 slot is marked for IPv4.
262  */
263 static int cxgbe_set_ftid(struct tid_info *t, int fidx, int family)
264 {
265         t4_os_lock(&t->ftid_lock);
266         if (rte_bitmap_get(t->ftid_bmap, fidx)) {
267                 t4_os_unlock(&t->ftid_lock);
268                 return -EBUSY;
269         }
270
271         if (family == FILTER_TYPE_IPV4) {
272                 rte_bitmap_set(t->ftid_bmap, fidx);
273         } else {
274                 rte_bitmap_set(t->ftid_bmap, fidx);
275                 rte_bitmap_set(t->ftid_bmap, fidx + 1);
276                 rte_bitmap_set(t->ftid_bmap, fidx + 2);
277                 rte_bitmap_set(t->ftid_bmap, fidx + 3);
278         }
279         t4_os_unlock(&t->ftid_lock);
280         return 0;
281 }
282
283 /**
284  * Clear the corresponding entry in the bitmap. 4 slots are
285  * cleared for IPv6, whereas only 1 slot is cleared for IPv4.
286  */
287 static void cxgbe_clear_ftid(struct tid_info *t, int fidx, int family)
288 {
289         t4_os_lock(&t->ftid_lock);
290         if (family == FILTER_TYPE_IPV4) {
291                 rte_bitmap_clear(t->ftid_bmap, fidx);
292         } else {
293                 rte_bitmap_clear(t->ftid_bmap, fidx);
294                 rte_bitmap_clear(t->ftid_bmap, fidx + 1);
295                 rte_bitmap_clear(t->ftid_bmap, fidx + 2);
296                 rte_bitmap_clear(t->ftid_bmap, fidx + 3);
297         }
298         t4_os_unlock(&t->ftid_lock);
299 }
300
301 /**
302  * Check a delete filter request for validity and send it to the hardware.
303  * Return 0 on success, an error number otherwise.  We attach any provided
304  * filter operation context to the internal filter specification in order to
305  * facilitate signaling completion of the operation.
306  */
307 int cxgbe_del_filter(struct rte_eth_dev *dev, unsigned int filter_id,
308                      struct ch_filter_specification *fs,
309                      struct filter_ctx *ctx)
310 {
311         struct port_info *pi = (struct port_info *)(dev->data->dev_private);
312         struct adapter *adapter = pi->adapter;
313         struct filter_entry *f;
314         int ret;
315
316         if (filter_id >= adapter->tids.nftids)
317                 return -ERANGE;
318
319         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
320         if (!ret) {
321                 dev_warn(adap, "%s: could not find filter entry: %u\n",
322                          __func__, filter_id);
323                 return -EINVAL;
324         }
325
326         f = &adapter->tids.ftid_tab[filter_id];
327         ret = writable_filter(f);
328         if (ret)
329                 return ret;
330
331         if (f->valid) {
332                 f->ctx = ctx;
333                 cxgbe_clear_ftid(&adapter->tids,
334                                  f->tid - adapter->tids.ftid_base,
335                                  f->fs.type ? FILTER_TYPE_IPV6 :
336                                               FILTER_TYPE_IPV4);
337                 return del_filter_wr(dev, filter_id);
338         }
339
340         /*
341          * If the caller has passed in a Completion Context then we need to
342          * mark it as a successful completion so they don't stall waiting
343          * for it.
344          */
345         if (ctx) {
346                 ctx->result = 0;
347                 t4_complete(&ctx->completion);
348         }
349
350         return 0;
351 }
352
353 /**
354  * Check a Chelsio Filter Request for validity, convert it into our internal
355  * format and send it to the hardware.  Return 0 on success, an error number
356  * otherwise.  We attach any provided filter operation context to the internal
357  * filter specification in order to facilitate signaling completion of the
358  * operation.
359  */
360 int cxgbe_set_filter(struct rte_eth_dev *dev, unsigned int filter_id,
361                      struct ch_filter_specification *fs,
362                      struct filter_ctx *ctx)
363 {
364         struct port_info *pi = ethdev2pinfo(dev);
365         struct adapter *adapter = pi->adapter;
366         unsigned int fidx, iq, fid_bit = 0;
367         struct filter_entry *f;
368         int ret;
369
370         if (filter_id >= adapter->tids.nftids)
371                 return -ERANGE;
372
373         ret = validate_filter(adapter, fs);
374         if (ret)
375                 return ret;
376
377         /*
378          * Ensure filter id is aligned on the 4 slot boundary for IPv6
379          * maskfull filters.
380          */
381         if (fs->type)
382                 filter_id &= ~(0x3);
383
384         ret = is_filter_set(&adapter->tids, filter_id, fs->type);
385         if (ret)
386                 return -EBUSY;
387
388         iq = get_filter_steerq(dev, fs);
389
390         /*
391          * IPv6 filters occupy four slots and must be aligned on
392          * four-slot boundaries.  IPv4 filters only occupy a single
393          * slot and have no alignment requirements but writing a new
394          * IPv4 filter into the middle of an existing IPv6 filter
395          * requires clearing the old IPv6 filter.
396          */
397         if (fs->type == FILTER_TYPE_IPV4) { /* IPv4 */
398                 /*
399                  * If our IPv4 filter isn't being written to a
400                  * multiple of four filter index and there's an IPv6
401                  * filter at the multiple of 4 base slot, then we need
402                  * to delete that IPv6 filter ...
403                  */
404                 fidx = filter_id & ~0x3;
405                 if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) {
406                         f = &adapter->tids.ftid_tab[fidx];
407                         if (f->valid)
408                                 return -EBUSY;
409                 }
410         } else { /* IPv6 */
411                 /*
412                  * Ensure that the IPv6 filter is aligned on a
413                  * multiple of 4 boundary.
414                  */
415                 if (filter_id & 0x3)
416                         return -EINVAL;
417
418                 /*
419                  * Check all except the base overlapping IPv4 filter
420                  * slots.
421                  */
422                 for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
423                         f = &adapter->tids.ftid_tab[fidx];
424                         if (f->valid)
425                                 return -EBUSY;
426                 }
427         }
428
429         /*
430          * Check to make sure that provided filter index is not
431          * already in use by someone else
432          */
433         f = &adapter->tids.ftid_tab[filter_id];
434         if (f->valid)
435                 return -EBUSY;
436
437         fidx = adapter->tids.ftid_base + filter_id;
438         fid_bit = filter_id;
439         ret = cxgbe_set_ftid(&adapter->tids, fid_bit,
440                              fs->type ? FILTER_TYPE_IPV6 : FILTER_TYPE_IPV4);
441         if (ret)
442                 return ret;
443
444         /*
445          * Check to make sure the filter requested is writable ...
446          */
447         ret = writable_filter(f);
448         if (ret) {
449                 /* Clear the bits we have set above */
450                 cxgbe_clear_ftid(&adapter->tids, fid_bit,
451                                  fs->type ? FILTER_TYPE_IPV6 :
452                                             FILTER_TYPE_IPV4);
453                 return ret;
454         }
455
456         /*
457          * Convert the filter specification into our internal format.
458          * We copy the PF/VF specification into the Outer VLAN field
459          * here so the rest of the code -- including the interface to
460          * the firmware -- doesn't have to constantly do these checks.
461          */
462         f->fs = *fs;
463         f->fs.iq = iq;
464         f->dev = dev;
465
466         /*
467          * Attempt to set the filter.  If we don't succeed, we clear
468          * it and return the failure.
469          */
470         f->ctx = ctx;
471         f->tid = fidx; /* Save the actual tid */
472         ret = set_filter_wr(dev, filter_id);
473         if (ret) {
474                 fid_bit = f->tid - adapter->tids.ftid_base;
475                 cxgbe_clear_ftid(&adapter->tids, fid_bit,
476                                  fs->type ? FILTER_TYPE_IPV6 :
477                                             FILTER_TYPE_IPV4);
478                 clear_filter(f);
479         }
480
481         return ret;
482 }
483
484 /**
485  * Handle a LE-TCAM filter write/deletion reply.
486  */
487 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
488 {
489         struct filter_entry *f = NULL;
490         unsigned int tid = GET_TID(rpl);
491         int idx, max_fidx = adap->tids.nftids;
492
493         /* Get the corresponding filter entry for this tid */
494         if (adap->tids.ftid_tab) {
495                 /* Check this in normal filter region */
496                 idx = tid - adap->tids.ftid_base;
497                 if (idx >= max_fidx)
498                         return;
499
500                 f = &adap->tids.ftid_tab[idx];
501                 if (f->tid != tid)
502                         return;
503         }
504
505         /* We found the filter entry for this tid */
506         if (f) {
507                 unsigned int ret = G_COOKIE(rpl->cookie);
508                 struct filter_ctx *ctx;
509
510                 /*
511                  * Pull off any filter operation context attached to the
512                  * filter.
513                  */
514                 ctx = f->ctx;
515                 f->ctx = NULL;
516
517                 if (ret == FW_FILTER_WR_FLT_ADDED) {
518                         f->pending = 0;  /* asynchronous setup completed */
519                         f->valid = 1;
520                         if (ctx) {
521                                 ctx->tid = f->tid;
522                                 ctx->result = 0;
523                         }
524                 } else if (ret == FW_FILTER_WR_FLT_DELETED) {
525                         /*
526                          * Clear the filter when we get confirmation from the
527                          * hardware that the filter has been deleted.
528                          */
529                         clear_filter(f);
530                         if (ctx)
531                                 ctx->result = 0;
532                 } else {
533                         /*
534                          * Something went wrong.  Issue a warning about the
535                          * problem and clear everything out.
536                          */
537                         dev_warn(adap, "filter %u setup failed with error %u\n",
538                                  idx, ret);
539                         clear_filter(f);
540                         if (ctx)
541                                 ctx->result = -EINVAL;
542                 }
543
544                 if (ctx)
545                         t4_complete(&ctx->completion);
546         }
547 }
548
549 /*
550  * Retrieve the packet count for the specified filter.
551  */
552 int cxgbe_get_filter_count(struct adapter *adapter, unsigned int fidx,
553                            u64 *c, bool get_byte)
554 {
555         struct filter_entry *f;
556         unsigned int tcb_base, tcbaddr;
557         int ret;
558
559         tcb_base = t4_read_reg(adapter, A_TP_CMM_TCB_BASE);
560         if (fidx >= adapter->tids.nftids)
561                 return -ERANGE;
562
563         f = &adapter->tids.ftid_tab[fidx];
564         if (!f->valid)
565                 return -EINVAL;
566
567         tcbaddr = tcb_base + f->tid * TCB_SIZE;
568
569         if (is_t5(adapter->params.chip) || is_t6(adapter->params.chip)) {
570                 /*
571                  * For T5, the Filter Packet Hit Count is maintained as a
572                  * 32-bit Big Endian value in the TCB field {timestamp}.
573                  * Similar to the craziness above, instead of the filter hit
574                  * count showing up at offset 20 ((W_TCB_TIMESTAMP == 5) *
575                  * sizeof(u32)), it actually shows up at offset 24.  Whacky.
576                  */
577                 if (get_byte) {
578                         unsigned int word_offset = 4;
579                         __be64 be64_byte_count;
580
581                         t4_os_lock(&adapter->win0_lock);
582                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
583                                            tcbaddr +
584                                            (word_offset * sizeof(__be32)),
585                                            sizeof(be64_byte_count),
586                                            &be64_byte_count,
587                                            T4_MEMORY_READ);
588                         t4_os_unlock(&adapter->win0_lock);
589                         if (ret < 0)
590                                 return ret;
591                         *c = be64_to_cpu(be64_byte_count);
592                 } else {
593                         unsigned int word_offset = 6;
594                         __be32 be32_count;
595
596                         t4_os_lock(&adapter->win0_lock);
597                         ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
598                                            tcbaddr +
599                                            (word_offset * sizeof(__be32)),
600                                            sizeof(be32_count), &be32_count,
601                                            T4_MEMORY_READ);
602                         t4_os_unlock(&adapter->win0_lock);
603                         if (ret < 0)
604                                 return ret;
605                         *c = (u64)be32_to_cpu(be32_count);
606                 }
607         }
608         return 0;
609 }