4 * Copyright (C) Cavium Inc. 2017. All Right reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_atomic.h>
45 #include <rte_errno.h>
46 #include <rte_memory.h>
47 #include <rte_malloc.h>
48 #include <rte_spinlock.h>
51 #include <rte_pmd_octeontx_ssovf.h>
52 #include "octeontx_fpavf.h"
54 /* FPA Mbox Message */
57 #define FPA_CONFIGSET 0x1
58 #define FPA_CONFIGGET 0x2
59 #define FPA_START_COUNT 0x3
60 #define FPA_STOP_COUNT 0x4
61 #define FPA_ATTACHAURA 0x5
62 #define FPA_DETACHAURA 0x6
63 #define FPA_SETAURALVL 0x7
64 #define FPA_GETAURALVL 0x8
66 #define FPA_COPROC 0x1
69 struct octeontx_mbox_fpa_cfg {
72 uint64_t pool_stack_base;
73 uint64_t pool_stack_end;
77 struct __attribute__((__packed__)) gen_req {
81 struct __attribute__((__packed__)) idn_req {
85 struct __attribute__((__packed__)) gen_resp {
90 struct __attribute__((__packed__)) dcfg_resp {
96 uint8_t net_port_count;
97 uint8_t virt_port_count;
100 #define FPA_MAX_POOL 32
101 #define FPA_PF_PAGE_SZ 4096
103 #define FPA_LN_SIZE 128
104 #define FPA_ROUND_UP(x, size) \
105 ((((unsigned long)(x)) + size-1) & (~(size-1)))
106 #define FPA_OBJSZ_2_CACHE_LINE(sz) (((sz) + RTE_CACHE_LINE_MASK) >> 7)
107 #define FPA_CACHE_LINE_2_OBJSZ(sz) ((sz) << 7)
109 #define POOL_ENA (0x1 << 0)
110 #define POOL_DIS (0x0 << 0)
111 #define POOL_SET_NAT_ALIGN (0x1 << 1)
112 #define POOL_DIS_NAT_ALIGN (0x0 << 1)
113 #define POOL_STYPE(x) (((x) & 0x1) << 2)
114 #define POOL_LTYPE(x) (((x) & 0x3) << 3)
115 #define POOL_BUF_OFFSET(x) (((x) & 0x7fffULL) << 16)
116 #define POOL_BUF_SIZE(x) (((x) & 0x7ffULL) << 32)
119 void *pool_stack_base;
121 uint64_t stack_ln_ptr;
123 uint16_t vf_id; /* gpool_id */
124 uint16_t sz128; /* Block size in cache lines */
128 struct octeontx_fpadev {
130 uint8_t total_gpool_cnt;
131 struct fpavf_res pool[FPA_VF_MAX];
134 static struct octeontx_fpadev fpadev;
136 /* lock is taken by caller */
138 octeontx_fpa_gpool_alloc(unsigned int object_size)
140 struct fpavf_res *res = NULL;
144 sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
146 for (gpool = 0; gpool < FPA_VF_MAX; gpool++) {
148 /* Skip VF that is not mapped Or _inuse */
149 if ((fpadev.pool[gpool].bar0 == NULL) ||
150 (fpadev.pool[gpool].is_inuse == true))
153 res = &fpadev.pool[gpool];
155 RTE_ASSERT(res->domain_id != (uint16_t)~0);
156 RTE_ASSERT(res->vf_id != (uint16_t)~0);
157 RTE_ASSERT(res->stack_ln_ptr != 0);
159 if (res->sz128 == 0) {
162 fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128);
170 /* lock is taken by caller */
171 static __rte_always_inline uintptr_t
172 octeontx_fpa_gpool2handle(uint16_t gpool)
174 struct fpavf_res *res = NULL;
176 RTE_ASSERT(gpool < FPA_VF_MAX);
178 res = &fpadev.pool[gpool];
179 if (unlikely(res == NULL))
182 return (uintptr_t)res->bar0 | gpool;
185 static __rte_always_inline bool
186 octeontx_fpa_handle_valid(uintptr_t handle)
188 struct fpavf_res *res = NULL;
193 if (unlikely(!handle))
197 gpool = octeontx_fpa_bufpool_gpool(handle);
199 /* get the bar address */
200 handle &= ~(uint64_t)FPA_GPOOL_MASK;
201 for (i = 0; i < FPA_VF_MAX; i++) {
202 if ((uintptr_t)fpadev.pool[i].bar0 != handle)
209 res = &fpadev.pool[i];
211 if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 ||
212 res->stack_ln_ptr == 0)
223 octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
224 signed short buf_offset, unsigned int max_buf_count)
227 phys_addr_t phys_addr;
229 struct fpavf_res *fpa = NULL;
231 struct octeontx_mbox_hdr hdr;
232 struct dcfg_resp resp;
233 struct octeontx_mbox_fpa_cfg cfg;
236 fpa = &fpadev.pool[gpool];
237 memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
240 /* Round-up to page size */
241 memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1);
242 memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE);
243 if (memptr == NULL) {
248 /* Configure stack */
249 fpa->pool_stack_base = memptr;
250 phys_addr = rte_malloc_virt2phy(memptr);
252 buf_size /= FPA_LN_SIZE;
255 hdr.coproc = FPA_COPROC;
256 hdr.msg = FPA_CONFIGSET;
257 hdr.vfid = fpa->vf_id;
260 buf_offset /= FPA_LN_SIZE;
261 reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) |
262 POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
267 cfg.pool_stack_base = phys_addr;
268 cfg.pool_stack_end = phys_addr + memsz;
269 cfg.aura_cfg = (1 << 9);
271 ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
272 sizeof(struct octeontx_mbox_fpa_cfg),
273 &resp, sizeof(resp));
279 fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n",
280 fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg,
281 cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg);
283 /* Now pool is in_use */
284 fpa->is_inuse = true;
294 octeontx_fpapf_pool_destroy(unsigned int gpool_index)
296 struct octeontx_mbox_hdr hdr;
297 struct dcfg_resp resp;
298 struct octeontx_mbox_fpa_cfg cfg;
299 struct fpavf_res *fpa = NULL;
302 fpa = &fpadev.pool[gpool_index];
304 hdr.coproc = FPA_COPROC;
305 hdr.msg = FPA_CONFIGSET;
306 hdr.vfid = fpa->vf_id;
309 /* reset and free the pool */
312 cfg.pool_stack_base = 0;
313 cfg.pool_stack_end = 0;
316 ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
317 sizeof(struct octeontx_mbox_fpa_cfg),
318 &resp, sizeof(resp));
326 /* anycase free pool stack memory */
327 rte_free(fpa->pool_stack_base);
328 fpa->pool_stack_base = NULL;
333 octeontx_fpapf_aura_attach(unsigned int gpool_index)
335 struct octeontx_mbox_hdr hdr;
336 struct dcfg_resp resp;
337 struct octeontx_mbox_fpa_cfg cfg;
340 if (gpool_index >= FPA_MAX_POOL) {
344 hdr.coproc = FPA_COPROC;
345 hdr.msg = FPA_ATTACHAURA;
346 hdr.vfid = gpool_index;
348 memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
349 cfg.aid = gpool_index; /* gpool is guara */
351 ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
352 sizeof(struct octeontx_mbox_fpa_cfg),
353 &resp, sizeof(resp));
355 fpavf_log_err("Could not attach fpa ");
356 fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
357 gpool_index, gpool_index, ret, hdr.res_code);
366 octeontx_fpapf_aura_detach(unsigned int gpool_index)
368 struct octeontx_mbox_fpa_cfg cfg = {0};
369 struct octeontx_mbox_hdr hdr = {0};
372 if (gpool_index >= FPA_MAX_POOL) {
377 cfg.aid = gpool_index; /* gpool is gaura */
378 hdr.coproc = FPA_COPROC;
379 hdr.msg = FPA_DETACHAURA;
380 hdr.vfid = gpool_index;
381 ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
383 fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
384 gpool_index, ret, hdr.res_code);
393 octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz,
394 void *memva, uint16_t gpool)
398 if (unlikely(!handle))
401 va_end = (uintptr_t)memva + memsz;
402 va_end &= ~RTE_CACHE_LINE_MASK;
405 fpavf_write64((uintptr_t)memva,
406 (void *)((uintptr_t)handle +
407 FPA_VF_VHPOOL_START_ADDR(gpool)));
408 fpavf_write64(va_end,
409 (void *)((uintptr_t)handle +
410 FPA_VF_VHPOOL_END_ADDR(gpool)));
415 octeontx_fpapf_start_count(uint16_t gpool_index)
418 struct octeontx_mbox_hdr hdr = {0};
420 if (gpool_index >= FPA_MAX_POOL) {
425 hdr.coproc = FPA_COPROC;
426 hdr.msg = FPA_START_COUNT;
427 hdr.vfid = gpool_index;
428 ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
430 fpavf_log_err("Could not start buffer counting for ");
431 fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
432 gpool_index, ret, hdr.res_code);
441 static __rte_always_inline int
442 octeontx_fpavf_free(unsigned int gpool)
446 if (gpool >= FPA_MAX_POOL) {
452 fpadev.pool[gpool].is_inuse = false;
458 static __rte_always_inline int
459 octeontx_gpool_free(uint16_t gpool)
461 if (fpadev.pool[gpool].sz128 != 0) {
462 fpadev.pool[gpool].sz128 = 0;
469 * Return buffer size for a given pool
472 octeontx_fpa_bufpool_block_size(uintptr_t handle)
474 struct fpavf_res *res = NULL;
477 if (unlikely(!octeontx_fpa_handle_valid(handle)))
481 gpool = octeontx_fpa_bufpool_gpool(handle);
482 res = &fpadev.pool[gpool];
483 return FPA_CACHE_LINE_2_OBJSZ(res->sz128);
487 octeontx_fpa_bufpool_free_count(uintptr_t handle)
489 uint64_t cnt, limit, avail;
493 if (unlikely(!octeontx_fpa_handle_valid(handle)))
497 gpool = octeontx_fpa_bufpool_gpool(handle);
499 /* Get pool bar address from handle */
500 pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
502 cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
503 FPA_VF_VHAURA_CNT(gpool)));
504 limit = fpavf_read64((void *)((uintptr_t)pool_bar +
505 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
507 avail = fpavf_read64((void *)((uintptr_t)pool_bar +
508 FPA_VF_VHPOOL_AVAILABLE(gpool)));
510 return RTE_MIN(avail, (limit - cnt));
514 octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
515 unsigned int buf_offset, char **va_start,
521 uintptr_t gpool_handle;
525 RTE_SET_USED(node_id);
526 FPAVF_STATIC_ASSERTION(sizeof(struct rte_mbuf) <=
527 OCTEONTX_FPAVF_BUF_OFFSET);
529 if (unlikely(*va_start == NULL))
532 object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
533 if (object_size > FPA_MAX_OBJ_SIZE) {
538 rte_spinlock_lock(&fpadev.lock);
539 res = octeontx_fpa_gpool_alloc(object_size);
542 if (unlikely(res < 0)) {
550 /* get pool handle */
551 gpool_handle = octeontx_fpa_gpool2handle(gpool);
552 if (!octeontx_fpa_handle_valid(gpool_handle)) {
554 goto error_gpool_free;
557 /* Get pool bar address from handle */
558 pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK;
560 res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset,
564 goto error_gpool_free;
567 /* populate AURA fields */
568 res = octeontx_fpapf_aura_attach(gpool);
571 goto error_pool_destroy;
575 memsz = object_size * object_count;
577 res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool);
580 goto error_gaura_detach;
584 rte_spinlock_unlock(&fpadev.lock);
586 /* populate AURA registers */
587 fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
588 FPA_VF_VHAURA_CNT(gpool)));
589 fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
590 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
591 fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
592 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
594 octeontx_fpapf_start_count(gpool);
599 (void) octeontx_fpapf_aura_detach(gpool);
601 octeontx_fpavf_free(gpool);
602 octeontx_fpapf_pool_destroy(gpool);
604 octeontx_gpool_free(gpool);
606 rte_spinlock_unlock(&fpadev.lock);
608 return (uintptr_t)NULL;
612 * Destroy a buffer pool.
615 octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
617 void **node, **curr, *head = NULL;
624 RTE_SET_USED(node_id);
626 /* Wait for all outstanding writes to be committed */
629 if (unlikely(!octeontx_fpa_handle_valid(handle)))
633 gpool = octeontx_fpa_bufpool_gpool(handle);
635 /* Get pool bar address from handle */
636 pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
638 /* Check for no outstanding buffers */
639 cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
640 FPA_VF_VHAURA_CNT(gpool)));
642 fpavf_log_dbg("buffer exist in pool cnt %ld\n", cnt);
646 rte_spinlock_lock(&fpadev.lock);
648 avail = fpavf_read64((void *)((uintptr_t)pool_bar +
649 FPA_VF_VHPOOL_AVAILABLE(gpool)));
651 /* Prepare to empty the entire POOL */
652 fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
653 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
654 fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
655 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
658 /* Invalidate the POOL */
659 octeontx_gpool_free(gpool);
661 /* Process all buffers in the pool */
664 /* Yank a buffer from the pool */
665 node = (void *)(uintptr_t)
666 fpavf_read64((void *)
667 (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
670 fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
675 /* Imsert it into an ordered linked list */
676 for (curr = &head; curr[0] != NULL; curr = curr[0]) {
677 if ((uintptr_t)node <= (uintptr_t)curr[0])
684 /* Verify the linked list to be a perfect series */
685 sz = octeontx_fpa_bufpool_block_size(handle) << 7;
686 for (curr = head; curr != NULL && curr[0] != NULL;
688 if (curr == curr[0] ||
689 ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
690 fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
691 gpool, curr, curr[0]);
695 /* Disable pool operation */
696 fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
697 FPA_VF_VHPOOL_START_ADDR(gpool)));
698 fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
699 FPA_VF_VHPOOL_END_ADDR(gpool)));
701 (void)octeontx_fpapf_pool_destroy(gpool);
703 /* Deactivate the AURA */
704 fpavf_write64(0, (void *)((uintptr_t)pool_bar +
705 FPA_VF_VHAURA_CNT_LIMIT(gpool)));
706 fpavf_write64(0, (void *)((uintptr_t)pool_bar +
707 FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
709 ret = octeontx_fpapf_aura_detach(gpool);
711 fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
716 (void)octeontx_fpavf_free(gpool);
718 rte_spinlock_unlock(&fpadev.lock);
723 octeontx_fpavf_setup(void)
726 static bool init_once;
729 rte_spinlock_init(&fpadev.lock);
730 fpadev.total_gpool_cnt = 0;
732 for (i = 0; i < FPA_VF_MAX; i++) {
734 fpadev.pool[i].domain_id = ~0;
735 fpadev.pool[i].stack_ln_ptr = 0;
736 fpadev.pool[i].sz128 = 0;
737 fpadev.pool[i].bar0 = NULL;
738 fpadev.pool[i].pool_stack_base = NULL;
739 fpadev.pool[i].is_inuse = false;
746 octeontx_fpavf_identify(void *bar0)
751 uint64_t stack_ln_ptr;
753 val = fpavf_read64((void *)((uintptr_t)bar0 +
754 FPA_VF_VHAURA_CNT_THRESHOLD(0)));
756 domain_id = (val >> 8) & 0xffff;
757 vf_id = (val >> 24) & 0xffff;
759 stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
760 FPA_VF_VHPOOL_THRESHOLD(0)));
761 if (vf_id >= FPA_VF_MAX) {
762 fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
766 if (fpadev.pool[vf_id].is_inuse) {
767 fpavf_log_err("vf_id %d is_inuse\n", vf_id);
771 fpadev.pool[vf_id].domain_id = domain_id;
772 fpadev.pool[vf_id].vf_id = vf_id;
773 fpadev.pool[vf_id].bar0 = bar0;
774 fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
780 /* FPAVF pcie device aka mempool probe */
782 fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
786 struct fpavf_res *fpa = NULL;
788 RTE_SET_USED(pci_drv);
791 /* For secondary processes, the primary has done all the work */
792 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
795 if (pci_dev->mem_resource[0].addr == NULL) {
796 fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
799 idreg = pci_dev->mem_resource[0].addr;
801 octeontx_fpavf_setup();
803 res = octeontx_fpavf_identify(idreg);
807 fpa = &fpadev.pool[res];
808 fpadev.total_gpool_cnt++;
811 fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
812 fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
813 fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
818 static const struct rte_pci_id pci_fpavf_map[] = {
820 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
821 PCI_DEVICE_ID_OCTEONTX_FPA_VF)
828 static struct rte_pci_driver pci_fpavf = {
829 .id_table = pci_fpavf_map,
830 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
831 .probe = fpavf_probe,
834 RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);