4 * Copyright (C) Cavium Inc. 2017. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include <rte_cycles.h>
38 #include <rte_malloc.h>
39 #include <rte_memory.h>
40 #include <rte_bus_pci.h>
41 #include <rte_spinlock.h>
43 #include "../octeontx_logs.h"
44 #include "octeontx_io.h"
45 #include "octeontx_pkovf.h"
47 struct octeontx_pko_iomem {
53 #define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
55 struct octeontx_pko_fc_ctl_s {
57 int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1];
60 struct octeontx_pkovf {
67 struct octeontx_pko_vf_ctl_s {
70 struct octeontx_pko_iomem fc_iomem;
71 struct octeontx_pko_fc_ctl_s *fc_ctl;
72 struct octeontx_pkovf pko[PKO_VF_MAX];
75 } dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ];
78 static struct octeontx_pko_vf_ctl_s pko_vf_ctl;
81 octeontx_pko_dq_vf_bar0(uint16_t txq)
85 vf_ix = txq / PKO_VF_NUM_DQ;
86 return pko_vf_ctl.pko[vf_ix].bar0;
90 octeontx_pko_dq_gdq(uint16_t txq)
92 return txq % PKO_VF_NUM_DQ;
99 int octeontx_pko_dq_open(uint16_t txq)
101 unsigned int reg_off;
106 vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
107 gdq = octeontx_pko_dq_gdq(txq);
109 if (unlikely(gdq < 0 || vf_bar0 == NULL))
111 *(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) =
112 PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID;
116 octeontx_write64(PKO_DQ_FC_DEPTH_PAGES,
117 vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq));
119 /* Set the register to return descriptor (packet) count as DEPTH */
120 /* KIND=1, NCB_QUERY_RSP=0 */
121 octeontx_write64(1ull << PKO_DQ_KIND_BIT,
122 vf_bar0 + PKO_VF_DQ_WM_CTL(gdq));
123 reg_off = PKO_VF_DQ_OP_OPEN(gdq);
125 rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
127 /* PKO_DQOP_E::OPEN */
128 if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1)
131 switch (rtn >> PKO_DQ_STATUS_BIT) {
132 case 0xC: /* DQALREADYCREATED */
139 /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
140 octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
142 return rtn & ((1ull << PKO_DQ_OP_BIT) - 1);
147 * Flush all packets pending.
150 int octeontx_pko_dq_close(uint16_t txq)
152 unsigned int reg_off;
157 vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
158 res = octeontx_pko_dq_gdq(txq);
160 if (unlikely(res < 0 || vf_bar0 == NULL))
163 reg_off = PKO_VF_DQ_OP_CLOSE(res);
165 rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
167 /* PKO_DQOP_E::CLOSE */
168 if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2)
171 switch (rtn >> PKO_DQ_STATUS_BIT) {
172 case 0xD: /* DQNOTCREATED */
179 res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */
183 /* Flush all packets pending on a DQ */
185 int octeontx_pko_dq_drain(uint16_t txq)
190 int res, timo = PKO_DQ_DRAIN_TO;
192 vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
193 res = octeontx_pko_dq_gdq(txq);
196 /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */
197 octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
198 /* Wait until buffers leave DQs */
199 reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
200 while (reg && timo > 0) {
203 reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
205 /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
206 octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
212 octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
213 unsigned int dq_num, unsigned int dq_from)
215 unsigned int dq, dq_cnt;
216 unsigned int dq_base;
220 while (dq < RTE_DIM(ctl->dq_map)) {
223 while (ctl->dq_map[dq].chanid == ~chanid &&
224 dq < RTE_DIM(ctl->dq_map)) {
226 if (dq_cnt == dq_num)
236 octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
237 unsigned int dq_base, unsigned int dq_num)
239 unsigned int dq, dq_cnt;
242 while (dq_cnt < dq_num) {
243 dq = dq_base + dq_cnt;
245 octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq,
248 ctl->dq_map[dq].chanid = ~chanid;
254 octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base,
255 unsigned int dq_num, uint64_t chanid)
257 const uint64_t null_chanid = ~0ull;
260 rte_spinlock_lock(&ctl->lock);
262 dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base);
263 if (dq < 0 || (unsigned int)dq != dq_base) {
264 rte_spinlock_unlock(&ctl->lock);
267 octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num);
269 rte_spinlock_unlock(&ctl->lock);
275 octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
277 const uint64_t null_chanid = ~0ull;
278 unsigned int dq = 0, dq_cnt = 0;
280 rte_spinlock_lock(&ctl->lock);
281 while (dq < RTE_DIM(ctl->dq_map)) {
282 if (ctl->dq_map[dq].chanid == ~chanid) {
283 ctl->dq_map[dq].chanid = ~null_chanid;
288 rte_spinlock_unlock(&ctl->lock);
290 return dq_cnt > 0 ? 0 : -EINVAL;
294 octeontx_pko_channel_open(int dq_base, int dq_num, int chanid)
296 struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
299 res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid);
307 octeontx_pko_channel_close(int chanid)
309 struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
312 res = octeontx_pko_dq_free(ctl, chanid);
320 octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
323 unsigned int dq, dq_cnt;
327 while (dq < RTE_DIM(ctl->dq_map)) {
328 dq_vf = dq / PKO_VF_NUM_DQ;
330 if (!ctl->pko[dq_vf].bar0) {
335 if (ctl->dq_map[dq].chanid != ~chanid) {
340 if (octeontx_pko_dq_open(dq) < 0)
351 octeontx_pko_channel_start(int chanid)
353 struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
356 dq_cnt = octeontx_pko_chan_start(ctl, chanid);
364 octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
366 unsigned int dq, dq_cnt, dq_vf;
371 while (dq < RTE_DIM(ctl->dq_map)) {
372 dq_vf = dq / PKO_VF_NUM_DQ;
374 if (!ctl->pko[dq_vf].bar0) {
379 if (ctl->dq_map[dq].chanid != ~chanid) {
384 res = octeontx_pko_dq_drain(dq);
386 octeontx_log_err("draining DQ%d, buffers left: %x",
389 res = octeontx_pko_dq_close(dq);
391 octeontx_log_err("closing DQ%d failed\n", dq);
400 octeontx_pko_channel_stop(int chanid)
402 struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
404 octeontx_pko_chan_stop(ctl, chanid);
409 octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
410 void *out, size_t out_elem_size,
411 size_t dq_num, octeontx_pko_dq_getter_t getter)
417 RTE_SET_USED(out_elem_size);
418 memset(&curr, 0, sizeof(octeontx_dq_t));
420 dq_vf = dq_num / PKO_VF_NUM_DQ;
421 dq = dq_num % PKO_VF_NUM_DQ;
423 if (!ctl->pko[dq_vf].bar0)
426 if (ctl->dq_map[dq_num].chanid != ~chanid)
429 uint8_t *iter = (uint8_t *)out;
430 curr.lmtline_va = ctl->pko[dq_vf].bar2;
431 curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
432 + PKO_VF_DQ_OP_SEND((dq), 0));
433 curr.fc_status_va = ctl->fc_ctl + dq;
435 octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
436 curr.lmtline_va, curr.ioreg_va,
439 getter(&curr, (void *)iter);
444 octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
445 size_t dq_num, octeontx_pko_dq_getter_t getter)
447 struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
450 dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size,
459 octeontx_pko_vf_count(void)
464 while (pko_vf_ctl.pko[vf_cnt].bar0)
471 octeontx_pko_init_fc(const size_t pko_vf_count)
479 fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) *
480 pko_vf_count * PKO_VF_NUM_DQ;
482 pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128);
483 if (unlikely(!pko_vf_ctl.fc_iomem.va)) {
484 octeontx_log_err("fc_iomem: not enough memory");
488 pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2phy((void *)
489 pko_vf_ctl.fc_iomem.va);
490 pko_vf_ctl.fc_iomem.size = fc_mem_size;
493 (struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va;
495 /* Configure Flow-Control feature for all DQs of open VFs */
496 for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
497 dq_ix = vf_idx * PKO_VF_NUM_DQ;
499 vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
501 reg = (pko_vf_ctl.fc_iomem.iova +
502 (sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F;
504 (0x2 << 3) | /* HYST_BITS */
505 (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) |
506 (0x1 << 0); /* ENABLE */
508 octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
510 octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
511 vf_bar0, (int)vf_idx, reg);
517 octeontx_pko_fc_free(void)
519 rte_free(pko_vf_ctl.fc_iomem.va);
523 octeontx_pkovf_setup(void)
525 static bool init_once;
530 rte_spinlock_init(&pko_vf_ctl.lock);
532 pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL;
533 pko_vf_ctl.fc_ctl = NULL;
535 for (i = 0; i < PKO_VF_MAX; i++) {
536 pko_vf_ctl.pko[i].bar0 = NULL;
537 pko_vf_ctl.pko[i].bar2 = NULL;
538 pko_vf_ctl.pko[i].domain = ~(uint16_t)0;
539 pko_vf_ctl.pko[i].vfid = ~(uint16_t)0;
542 for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++)
543 pko_vf_ctl.dq_map[i].chanid = 0;
549 /* PKOVF pcie device*/
551 pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
558 struct octeontx_pkovf *res;
560 RTE_SET_USED(pci_drv);
562 /* For secondary processes, the primary has done all the work */
563 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
566 if (pci_dev->mem_resource[0].addr == NULL ||
567 pci_dev->mem_resource[2].addr == NULL) {
568 octeontx_log_err("Empty bars %p %p",
569 pci_dev->mem_resource[0].addr,
570 pci_dev->mem_resource[2].addr);
573 bar0 = pci_dev->mem_resource[0].addr;
574 bar2 = pci_dev->mem_resource[2].addr;
576 octeontx_pkovf_setup();
578 /* get vfid and domain */
579 val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG);
580 domain = (val >> 7) & 0xffff;
581 vfid = (val >> 23) & 0xffff;
583 if (unlikely(vfid >= PKO_VF_MAX)) {
584 octeontx_log_err("pko: Invalid vfid %d", vfid);
588 res = &pko_vf_ctl.pko[vfid];
590 res->domain = domain;
594 octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid);
598 #define PCI_VENDOR_ID_CAVIUM 0x177D
599 #define PCI_DEVICE_ID_OCTEONTX_PKO_VF 0xA049
601 static const struct rte_pci_id pci_pkovf_map[] = {
603 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
604 PCI_DEVICE_ID_OCTEONTX_PKO_VF)
611 static struct rte_pci_driver pci_pkovf = {
612 .id_table = pci_pkovf_map,
613 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
614 .probe = pkovf_probe,
617 RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf);