dacbdd0b44a5d4297005fd2c207eb3ea6d509d0c
[dpdk.git] / drivers / net / octeontx / base / octeontx_pkovf.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <stdbool.h>
6 #include <string.h>
7 #include <stdio.h>
8
9 #include <rte_eal.h>
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
13 #include <rte_bus_pci.h>
14 #include <rte_spinlock.h>
15
16 #include "../octeontx_logs.h"
17 #include "octeontx_io.h"
18 #include "octeontx_pkovf.h"
19
20 struct octeontx_pko_iomem {
21         uint8_t         *va;
22         rte_iova_t      iova;
23         size_t          size;
24 };
25
26 #define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
27 #define PKO_VALID       0x1
28 #define PKO_INUSE       0x2
29
30 struct octeontx_pko_fc_ctl_s {
31         int64_t buf_cnt;
32         int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1];
33 };
34
35 struct octeontx_pkovf {
36         uint8_t         *bar0;
37         uint8_t         *bar2;
38         uint8_t         status;
39         uint16_t        domain;
40         uint16_t        vfid;
41 };
42
43 struct octeontx_pko_vf_ctl_s {
44         rte_spinlock_t lock;
45         uint16_t global_domain;
46         struct octeontx_pko_iomem fc_iomem;
47         struct octeontx_pko_fc_ctl_s *fc_ctl;
48         struct octeontx_pkovf pko[PKO_VF_MAX];
49         struct {
50                 uint64_t chanid;
51         } dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ];
52 };
53
54 static struct octeontx_pko_vf_ctl_s pko_vf_ctl;
55
56 static void *
57 octeontx_pko_dq_vf_bar0(uint16_t txq)
58 {
59         int vf_ix;
60
61         vf_ix = txq / PKO_VF_NUM_DQ;
62         return pko_vf_ctl.pko[vf_ix].bar0;
63 }
64
65 static int
66 octeontx_pko_dq_gdq(uint16_t txq)
67 {
68         return txq % PKO_VF_NUM_DQ;
69 }
70
71 /**
72  * Open a PKO DQ.
73  */
74 static inline
75 int octeontx_pko_dq_open(uint16_t txq)
76 {
77         unsigned int reg_off;
78         uint8_t *vf_bar0;
79         uint64_t rtn;
80         int gdq;
81
82         vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
83         gdq = octeontx_pko_dq_gdq(txq);
84
85         if (unlikely(gdq < 0 || vf_bar0 == NULL))
86                 return -EINVAL;
87         *(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) =
88                 PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID;
89
90         rte_wmb();
91
92         octeontx_write64(PKO_DQ_FC_DEPTH_PAGES,
93                          vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq));
94
95         /* Set the register to return descriptor (packet) count as DEPTH */
96         /* KIND=1, NCB_QUERY_RSP=0 */
97         octeontx_write64(1ull << PKO_DQ_KIND_BIT,
98                                 vf_bar0 + PKO_VF_DQ_WM_CTL(gdq));
99         reg_off = PKO_VF_DQ_OP_OPEN(gdq);
100
101         rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
102
103         /* PKO_DQOP_E::OPEN */
104         if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1)
105                 return -EIO;
106
107         switch (rtn >> PKO_DQ_STATUS_BIT) {
108         case 0xC:       /* DQALREADYCREATED */
109         case 0x0:       /* PASS */
110                 break;
111         default:
112                 return -EIO;
113         }
114
115         /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
116         octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
117
118         return rtn & ((1ull << PKO_DQ_OP_BIT) - 1);
119 }
120
121 /**
122  * Close a PKO DQ
123  * Flush all packets pending.
124  */
125 static inline
126 int octeontx_pko_dq_close(uint16_t txq)
127 {
128         unsigned int reg_off;
129         uint8_t *vf_bar0;
130         uint64_t rtn;
131         int res;
132
133         vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
134         res = octeontx_pko_dq_gdq(txq);
135
136         if (unlikely(res < 0 || vf_bar0 == NULL))
137                 return -EINVAL;
138
139         reg_off = PKO_VF_DQ_OP_CLOSE(res);
140
141         rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
142
143         /* PKO_DQOP_E::CLOSE */
144         if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2)
145                 return -EIO;
146
147         switch (rtn >> PKO_DQ_STATUS_BIT) {
148         case 0xD:       /* DQNOTCREATED */
149         case 0x0:       /* PASS */
150                 break;
151         default:
152                 return -EIO;
153         }
154
155         res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */
156         return res;
157 }
158
159 /* Flush all packets pending on a DQ */
160 static inline
161 int octeontx_pko_dq_drain(uint16_t txq)
162 {
163         unsigned int gdq;
164         uint8_t *vf_bar0;
165         uint64_t reg;
166         int res, timo = PKO_DQ_DRAIN_TO;
167
168         vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
169         res = octeontx_pko_dq_gdq(txq);
170         gdq = res;
171
172          /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */
173          octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
174         /* Wait until buffers leave DQs */
175         reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
176         while (reg && timo > 0) {
177                 rte_delay_us(100);
178                 timo--;
179                 reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
180         }
181         /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
182         octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
183
184         return reg;
185 }
186
187 static inline int
188 octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
189                              unsigned int dq_num, unsigned int dq_from)
190 {
191         unsigned int dq, dq_cnt;
192         unsigned int dq_base;
193
194         dq_cnt = 0;
195         dq = dq_from;
196         while (dq < RTE_DIM(ctl->dq_map)) {
197                 dq_base = dq;
198                 dq_cnt = 0;
199                 while (ctl->dq_map[dq].chanid == ~chanid &&
200                         dq < RTE_DIM(ctl->dq_map)) {
201                         dq_cnt++;
202                         if (dq_cnt == dq_num)
203                                 return dq_base;
204                         dq++;
205                 }
206                 dq++;
207         }
208         return -1;
209 }
210
211 static inline void
212 octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
213                              unsigned int dq_base, unsigned int dq_num)
214 {
215         unsigned int dq, dq_cnt;
216
217         dq_cnt = 0;
218         while (dq_cnt < dq_num) {
219                 dq = dq_base + dq_cnt;
220
221                 octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq,
222                         chanid);
223
224                 ctl->dq_map[dq].chanid = ~chanid;
225                 dq_cnt++;
226         }
227 }
228
229 static inline int
230 octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base,
231                       unsigned int dq_num, uint64_t chanid)
232 {
233         const uint64_t null_chanid = ~0ull;
234         int dq;
235
236         rte_spinlock_lock(&ctl->lock);
237
238         dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base);
239         if (dq < 0 || (unsigned int)dq != dq_base) {
240                 rte_spinlock_unlock(&ctl->lock);
241                 return -1;
242         }
243         octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num);
244
245         rte_spinlock_unlock(&ctl->lock);
246
247         return 0;
248 }
249
250 static inline int
251 octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
252 {
253         const uint64_t null_chanid = ~0ull;
254         unsigned int dq = 0, dq_cnt = 0;
255
256         rte_spinlock_lock(&ctl->lock);
257         while (dq < RTE_DIM(ctl->dq_map)) {
258                 if (ctl->dq_map[dq].chanid == ~chanid) {
259                         ctl->dq_map[dq].chanid = ~null_chanid;
260                         dq_cnt++;
261                 }
262                 dq++;
263         }
264         rte_spinlock_unlock(&ctl->lock);
265
266         return dq_cnt > 0 ? 0 : -EINVAL;
267 }
268
269 int
270 octeontx_pko_channel_open(int dq_base, int dq_num, int chanid)
271 {
272         struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
273         int res;
274
275         res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid);
276         if (res < 0)
277                 return -1;
278
279         return 0;
280 }
281
282 int
283 octeontx_pko_channel_close(int chanid)
284 {
285         struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
286         int res;
287
288         res = octeontx_pko_dq_free(ctl, chanid);
289         if (res < 0)
290                 return -1;
291
292         return 0;
293 }
294
295 static inline int
296 octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
297 {
298         unsigned int dq_vf;
299         unsigned int dq, dq_cnt;
300
301         dq_cnt = 0;
302         dq = 0;
303         while (dq < RTE_DIM(ctl->dq_map)) {
304                 dq_vf = dq / PKO_VF_NUM_DQ;
305
306                 if (!ctl->pko[dq_vf].bar0) {
307                         dq += PKO_VF_NUM_DQ;
308                         continue;
309                 }
310
311                 if (ctl->dq_map[dq].chanid != ~chanid) {
312                         dq++;
313                         continue;
314                 }
315
316                 if (octeontx_pko_dq_open(dq) < 0)
317                         break;
318
319                 dq_cnt++;
320                 dq++;
321         }
322
323         return dq_cnt;
324 }
325
326 int
327 octeontx_pko_channel_start(int chanid)
328 {
329         struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
330         int dq_cnt;
331
332         dq_cnt = octeontx_pko_chan_start(ctl, chanid);
333         if (dq_cnt < 0)
334                 return -1;
335
336         return dq_cnt;
337 }
338
339 static inline int
340 octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
341 {
342         unsigned int dq, dq_cnt, dq_vf;
343         int res;
344
345         dq_cnt = 0;
346         dq = 0;
347         while (dq < RTE_DIM(ctl->dq_map)) {
348                 dq_vf = dq / PKO_VF_NUM_DQ;
349
350                 if (!ctl->pko[dq_vf].bar0) {
351                         dq += PKO_VF_NUM_DQ;
352                         continue;
353                 }
354
355                 if (ctl->dq_map[dq].chanid != ~chanid) {
356                         dq++;
357                         continue;
358                 }
359
360                 res = octeontx_pko_dq_drain(dq);
361                 if (res > 0)
362                         octeontx_log_err("draining DQ%d, buffers left: %x",
363                                          dq, res);
364
365                 res = octeontx_pko_dq_close(dq);
366                 if (res < 0)
367                         octeontx_log_err("closing DQ%d failed\n", dq);
368
369                 dq_cnt++;
370                 dq++;
371         }
372         return dq_cnt;
373 }
374
375 int
376 octeontx_pko_channel_stop(int chanid)
377 {
378         struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
379
380         octeontx_pko_chan_stop(ctl, chanid);
381         return 0;
382 }
383
384 static inline int
385 octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
386                            void *out, size_t out_elem_size,
387                            size_t dq_num, octeontx_pko_dq_getter_t getter)
388 {
389         octeontx_dq_t curr;
390         unsigned int dq_vf;
391         unsigned int dq;
392
393         RTE_SET_USED(out_elem_size);
394         memset(&curr, 0, sizeof(octeontx_dq_t));
395
396         dq_vf = dq_num / PKO_VF_NUM_DQ;
397         dq = dq_num % PKO_VF_NUM_DQ;
398
399         if (!ctl->pko[dq_vf].bar0)
400                 return -EINVAL;
401
402         if (ctl->dq_map[dq_num].chanid != ~chanid)
403                 return -EINVAL;
404
405         uint8_t *iter = (uint8_t *)out;
406         curr.lmtline_va = ctl->pko[dq_vf].bar2;
407         curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
408                 + PKO_VF_DQ_OP_SEND((dq), 0));
409         curr.fc_status_va = ctl->fc_ctl + dq_num;
410
411         octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
412                          curr.lmtline_va, curr.ioreg_va,
413                          curr.fc_status_va);
414
415         getter(&curr, (void *)iter);
416         return 0;
417 }
418
419 int
420 octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
421                                 size_t dq_num, octeontx_pko_dq_getter_t getter)
422 {
423         struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
424         int dq_cnt;
425
426         dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size,
427                                                 dq_num, getter);
428         if (dq_cnt < 0)
429                 return -1;
430
431         return dq_cnt;
432 }
433
434 int
435 octeontx_pko_vf_count(void)
436 {
437         uint16_t global_domain = octeontx_get_global_domain();
438         int vf_cnt;
439
440         pko_vf_ctl.global_domain = global_domain;
441         vf_cnt = 0;
442         while (pko_vf_ctl.pko[vf_cnt].bar0)
443                 vf_cnt++;
444
445         return vf_cnt;
446 }
447
448 size_t
449 octeontx_pko_get_vfid(void)
450 {
451         size_t vf_cnt = octeontx_pko_vf_count();
452         size_t vf_idx;
453
454
455         for (vf_idx = 0; vf_idx < vf_cnt; vf_idx++) {
456                 if (!(pko_vf_ctl.pko[vf_idx].status & PKO_VALID))
457                         continue;
458                 if (pko_vf_ctl.pko[vf_idx].status & PKO_INUSE)
459                         continue;
460
461                 pko_vf_ctl.pko[vf_idx].status |= PKO_INUSE;
462                 return pko_vf_ctl.pko[vf_idx].vfid;
463         }
464
465         return SIZE_MAX;
466 }
467
468 int
469 octeontx_pko_init_fc(const size_t pko_vf_count)
470 {
471         int dq_ix;
472         uint64_t reg;
473         uint8_t *vf_bar0;
474         size_t vf_idx;
475         size_t fc_mem_size;
476
477         fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) *
478                         pko_vf_count * PKO_VF_NUM_DQ;
479
480         pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128);
481         if (unlikely(!pko_vf_ctl.fc_iomem.va)) {
482                 octeontx_log_err("fc_iomem: not enough memory");
483                 return -ENOMEM;
484         }
485
486         pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2iova((void *)
487                                                         pko_vf_ctl.fc_iomem.va);
488         pko_vf_ctl.fc_iomem.size = fc_mem_size;
489
490         pko_vf_ctl.fc_ctl =
491                 (struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va;
492
493         /* Configure Flow-Control feature for all DQs of open VFs */
494         for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
495                 if (pko_vf_ctl.pko[vf_idx].domain != pko_vf_ctl.global_domain)
496                         continue;
497
498                 dq_ix = pko_vf_ctl.pko[vf_idx].vfid * PKO_VF_NUM_DQ;
499                 vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
500
501                 reg = (pko_vf_ctl.fc_iomem.iova +
502                         (sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F;
503                 reg |=                  /* BASE */
504                     (0x2 << 3) |        /* HYST_BITS */
505                     (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) |
506                     (0x1 << 0);         /* ENABLE */
507
508                 octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
509                 pko_vf_ctl.pko[vf_idx].status = PKO_VALID;
510
511                 octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
512                                  vf_bar0, (int)vf_idx, reg);
513         }
514         return 0;
515 }
516
517 void
518 octeontx_pko_fc_free(void)
519 {
520         rte_free(pko_vf_ctl.fc_iomem.va);
521 }
522
523 static void
524 octeontx_pkovf_setup(void)
525 {
526         static bool init_once;
527
528         if (!init_once) {
529                 unsigned int i;
530
531                 rte_spinlock_init(&pko_vf_ctl.lock);
532
533                 pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL;
534                 pko_vf_ctl.fc_ctl = NULL;
535
536                 for (i = 0; i < PKO_VF_MAX; i++) {
537                         pko_vf_ctl.pko[i].bar0 = NULL;
538                         pko_vf_ctl.pko[i].bar2 = NULL;
539                         pko_vf_ctl.pko[i].domain = ~(uint16_t)0;
540                         pko_vf_ctl.pko[i].vfid = ~(uint16_t)0;
541                 }
542
543                 for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++)
544                         pko_vf_ctl.dq_map[i].chanid = 0;
545
546                 init_once = true;
547         }
548 }
549
550 /* PKOVF pcie device*/
551 static int
552 pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
553 {
554         uint64_t val;
555         uint16_t vfid;
556         uint16_t domain;
557         uint8_t *bar0;
558         uint8_t *bar2;
559         static uint8_t vf_cnt;
560         struct octeontx_pkovf *res;
561
562         RTE_SET_USED(pci_drv);
563
564         /* For secondary processes, the primary has done all the work */
565         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
566                 return 0;
567
568         if (pci_dev->mem_resource[0].addr == NULL ||
569             pci_dev->mem_resource[2].addr == NULL) {
570                 octeontx_log_err("Empty bars %p %p",
571                         pci_dev->mem_resource[0].addr,
572                         pci_dev->mem_resource[2].addr);
573                 return -ENODEV;
574         }
575         bar0 = pci_dev->mem_resource[0].addr;
576         bar2 = pci_dev->mem_resource[2].addr;
577
578         octeontx_pkovf_setup();
579
580         /* get vfid and domain */
581         val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG);
582         domain = (val >> 7) & 0xffff;
583         vfid = (val >> 23) & 0xffff;
584
585         if (unlikely(vfid >= PKO_VF_MAX)) {
586                 octeontx_log_err("pko: Invalid vfid %d", vfid);
587                 return -EINVAL;
588         }
589
590         res = &pko_vf_ctl.pko[vf_cnt++];
591         res->vfid = vfid;
592         res->domain = domain;
593         res->bar0 = bar0;
594         res->bar2 = bar2;
595
596         octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid);
597         return 0;
598 }
599
600 #define PCI_VENDOR_ID_CAVIUM               0x177D
601 #define PCI_DEVICE_ID_OCTEONTX_PKO_VF      0xA049
602
603 static const struct rte_pci_id pci_pkovf_map[] = {
604         {
605                 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
606                                 PCI_DEVICE_ID_OCTEONTX_PKO_VF)
607         },
608         {
609                 .vendor_id = 0,
610         },
611 };
612
613 static struct rte_pci_driver pci_pkovf = {
614         .id_table = pci_pkovf_map,
615         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
616         .probe = pkovf_probe,
617 };
618
619 RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf);