1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
10 #include <rte_cycles.h>
11 #include <rte_malloc.h>
12 #include <rte_memory.h>
13 #include <rte_bus_pci.h>
14 #include <rte_spinlock.h>
16 #include "../octeontx_logs.h"
17 #include "octeontx_io.h"
18 #include "octeontx_pkovf.h"
20 struct octeontx_pko_iomem {
26 #define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
28 struct octeontx_pko_fc_ctl_s {
30 int64_t padding[(PKO_DQ_FC_STRIDE / 8) - 1];
33 struct octeontx_pkovf {
40 struct octeontx_pko_vf_ctl_s {
43 struct octeontx_pko_iomem fc_iomem;
44 struct octeontx_pko_fc_ctl_s *fc_ctl;
45 struct octeontx_pkovf pko[PKO_VF_MAX];
48 } dq_map[PKO_VF_MAX * PKO_VF_NUM_DQ];
51 static struct octeontx_pko_vf_ctl_s pko_vf_ctl;
54 octeontx_pko_dq_vf_bar0(uint16_t txq)
58 vf_ix = txq / PKO_VF_NUM_DQ;
59 return pko_vf_ctl.pko[vf_ix].bar0;
63 octeontx_pko_dq_gdq(uint16_t txq)
65 return txq % PKO_VF_NUM_DQ;
72 int octeontx_pko_dq_open(uint16_t txq)
79 vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
80 gdq = octeontx_pko_dq_gdq(txq);
82 if (unlikely(gdq < 0 || vf_bar0 == NULL))
84 *(volatile int64_t*)(pko_vf_ctl.fc_ctl + txq) =
85 PKO_DQ_FC_DEPTH_PAGES - PKO_DQ_FC_SKID;
89 octeontx_write64(PKO_DQ_FC_DEPTH_PAGES,
90 vf_bar0 + PKO_VF_DQ_FC_STATUS(gdq));
92 /* Set the register to return descriptor (packet) count as DEPTH */
93 /* KIND=1, NCB_QUERY_RSP=0 */
94 octeontx_write64(1ull << PKO_DQ_KIND_BIT,
95 vf_bar0 + PKO_VF_DQ_WM_CTL(gdq));
96 reg_off = PKO_VF_DQ_OP_OPEN(gdq);
98 rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
100 /* PKO_DQOP_E::OPEN */
101 if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x1)
104 switch (rtn >> PKO_DQ_STATUS_BIT) {
105 case 0xC: /* DQALREADYCREATED */
112 /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
113 octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
115 return rtn & ((1ull << PKO_DQ_OP_BIT) - 1);
120 * Flush all packets pending.
123 int octeontx_pko_dq_close(uint16_t txq)
125 unsigned int reg_off;
130 vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
131 res = octeontx_pko_dq_gdq(txq);
133 if (unlikely(res < 0 || vf_bar0 == NULL))
136 reg_off = PKO_VF_DQ_OP_CLOSE(res);
138 rtn = octeontx_reg_ldadd_u64(vf_bar0 + reg_off, 0);
140 /* PKO_DQOP_E::CLOSE */
141 if (((rtn >> PKO_DQ_OP_BIT) & 0x3) != 0x2)
144 switch (rtn >> PKO_DQ_STATUS_BIT) {
145 case 0xD: /* DQNOTCREATED */
152 res = rtn & ((1ull << PKO_DQ_OP_BIT) - 1); /* DEPTH */
156 /* Flush all packets pending on a DQ */
158 int octeontx_pko_dq_drain(uint16_t txq)
163 int res, timo = PKO_DQ_DRAIN_TO;
165 vf_bar0 = octeontx_pko_dq_vf_bar0(txq);
166 res = octeontx_pko_dq_gdq(txq);
169 /* DRAIN=1, DRAIN_NULL_LINK=0, SW_XOFF=1 */
170 octeontx_write64(0x3, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
171 /* Wait until buffers leave DQs */
172 reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
173 while (reg && timo > 0) {
176 reg = octeontx_read64(vf_bar0 + PKO_VF_DQ_WM_CNT(gdq));
178 /* DRAIN=0, DRAIN_NULL_LINK=0, SW_XOFF=0 */
179 octeontx_write64(0, vf_bar0 + PKO_VF_DQ_SW_XOFF(gdq));
185 octeontx_pko_dq_range_lookup(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
186 unsigned int dq_num, unsigned int dq_from)
188 unsigned int dq, dq_cnt;
189 unsigned int dq_base;
193 while (dq < RTE_DIM(ctl->dq_map)) {
196 while (ctl->dq_map[dq].chanid == ~chanid &&
197 dq < RTE_DIM(ctl->dq_map)) {
199 if (dq_cnt == dq_num)
209 octeontx_pko_dq_range_assign(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
210 unsigned int dq_base, unsigned int dq_num)
212 unsigned int dq, dq_cnt;
215 while (dq_cnt < dq_num) {
216 dq = dq_base + dq_cnt;
218 octeontx_log_dbg("DQ# %u assigned to CHAN# %" PRIx64 "", dq,
221 ctl->dq_map[dq].chanid = ~chanid;
227 octeontx_pko_dq_claim(struct octeontx_pko_vf_ctl_s *ctl, unsigned int dq_base,
228 unsigned int dq_num, uint64_t chanid)
230 const uint64_t null_chanid = ~0ull;
233 rte_spinlock_lock(&ctl->lock);
235 dq = octeontx_pko_dq_range_lookup(ctl, null_chanid, dq_num, dq_base);
236 if (dq < 0 || (unsigned int)dq != dq_base) {
237 rte_spinlock_unlock(&ctl->lock);
240 octeontx_pko_dq_range_assign(ctl, chanid, dq_base, dq_num);
242 rte_spinlock_unlock(&ctl->lock);
248 octeontx_pko_dq_free(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
250 const uint64_t null_chanid = ~0ull;
251 unsigned int dq = 0, dq_cnt = 0;
253 rte_spinlock_lock(&ctl->lock);
254 while (dq < RTE_DIM(ctl->dq_map)) {
255 if (ctl->dq_map[dq].chanid == ~chanid) {
256 ctl->dq_map[dq].chanid = ~null_chanid;
261 rte_spinlock_unlock(&ctl->lock);
263 return dq_cnt > 0 ? 0 : -EINVAL;
267 octeontx_pko_channel_open(int dq_base, int dq_num, int chanid)
269 struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
272 res = octeontx_pko_dq_claim(ctl, dq_base, dq_num, chanid);
280 octeontx_pko_channel_close(int chanid)
282 struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
285 res = octeontx_pko_dq_free(ctl, chanid);
293 octeontx_pko_chan_start(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
296 unsigned int dq, dq_cnt;
300 while (dq < RTE_DIM(ctl->dq_map)) {
301 dq_vf = dq / PKO_VF_NUM_DQ;
303 if (!ctl->pko[dq_vf].bar0) {
308 if (ctl->dq_map[dq].chanid != ~chanid) {
313 if (octeontx_pko_dq_open(dq) < 0)
324 octeontx_pko_channel_start(int chanid)
326 struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
329 dq_cnt = octeontx_pko_chan_start(ctl, chanid);
337 octeontx_pko_chan_stop(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid)
339 unsigned int dq, dq_cnt, dq_vf;
344 while (dq < RTE_DIM(ctl->dq_map)) {
345 dq_vf = dq / PKO_VF_NUM_DQ;
347 if (!ctl->pko[dq_vf].bar0) {
352 if (ctl->dq_map[dq].chanid != ~chanid) {
357 res = octeontx_pko_dq_drain(dq);
359 octeontx_log_err("draining DQ%d, buffers left: %x",
362 res = octeontx_pko_dq_close(dq);
364 octeontx_log_err("closing DQ%d failed\n", dq);
373 octeontx_pko_channel_stop(int chanid)
375 struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
377 octeontx_pko_chan_stop(ctl, chanid);
382 octeontx_pko_channel_query(struct octeontx_pko_vf_ctl_s *ctl, uint64_t chanid,
383 void *out, size_t out_elem_size,
384 size_t dq_num, octeontx_pko_dq_getter_t getter)
390 RTE_SET_USED(out_elem_size);
391 memset(&curr, 0, sizeof(octeontx_dq_t));
393 dq_vf = dq_num / PKO_VF_NUM_DQ;
394 dq = dq_num % PKO_VF_NUM_DQ;
396 if (!ctl->pko[dq_vf].bar0)
399 if (ctl->dq_map[dq_num].chanid != ~chanid)
402 uint8_t *iter = (uint8_t *)out;
403 curr.lmtline_va = ctl->pko[dq_vf].bar2;
404 curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
405 + PKO_VF_DQ_OP_SEND((dq), 0));
406 curr.fc_status_va = ctl->fc_ctl + dq;
408 octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
409 curr.lmtline_va, curr.ioreg_va,
412 getter(&curr, (void *)iter);
417 octeontx_pko_channel_query_dqs(int chanid, void *out, size_t out_elem_size,
418 size_t dq_num, octeontx_pko_dq_getter_t getter)
420 struct octeontx_pko_vf_ctl_s *ctl = &pko_vf_ctl;
423 dq_cnt = octeontx_pko_channel_query(ctl, chanid, out, out_elem_size,
432 octeontx_pko_vf_count(void)
437 while (pko_vf_ctl.pko[vf_cnt].bar0)
444 octeontx_pko_init_fc(const size_t pko_vf_count)
452 fc_mem_size = sizeof(struct octeontx_pko_fc_ctl_s) *
453 pko_vf_count * PKO_VF_NUM_DQ;
455 pko_vf_ctl.fc_iomem.va = rte_malloc(NULL, fc_mem_size, 128);
456 if (unlikely(!pko_vf_ctl.fc_iomem.va)) {
457 octeontx_log_err("fc_iomem: not enough memory");
461 pko_vf_ctl.fc_iomem.iova = rte_malloc_virt2iova((void *)
462 pko_vf_ctl.fc_iomem.va);
463 pko_vf_ctl.fc_iomem.size = fc_mem_size;
466 (struct octeontx_pko_fc_ctl_s *)pko_vf_ctl.fc_iomem.va;
468 /* Configure Flow-Control feature for all DQs of open VFs */
469 for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
470 dq_ix = vf_idx * PKO_VF_NUM_DQ;
472 vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
474 reg = (pko_vf_ctl.fc_iomem.iova +
475 (sizeof(struct octeontx_pko_fc_ctl_s) * dq_ix)) & ~0x7F;
477 (0x2 << 3) | /* HYST_BITS */
478 (((PKO_DQ_FC_STRIDE == PKO_DQ_FC_STRIDE_16) ? 1 : 0) << 2) |
479 (0x1 << 0); /* ENABLE */
481 octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
483 octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
484 vf_bar0, (int)vf_idx, reg);
490 octeontx_pko_fc_free(void)
492 rte_free(pko_vf_ctl.fc_iomem.va);
496 octeontx_pkovf_setup(void)
498 static bool init_once;
503 rte_spinlock_init(&pko_vf_ctl.lock);
505 pko_vf_ctl.fc_iomem = PKO_IOMEM_NULL;
506 pko_vf_ctl.fc_ctl = NULL;
508 for (i = 0; i < PKO_VF_MAX; i++) {
509 pko_vf_ctl.pko[i].bar0 = NULL;
510 pko_vf_ctl.pko[i].bar2 = NULL;
511 pko_vf_ctl.pko[i].domain = ~(uint16_t)0;
512 pko_vf_ctl.pko[i].vfid = ~(uint16_t)0;
515 for (i = 0; i < (PKO_VF_MAX * PKO_VF_NUM_DQ); i++)
516 pko_vf_ctl.dq_map[i].chanid = 0;
522 /* PKOVF pcie device*/
524 pkovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
531 struct octeontx_pkovf *res;
533 RTE_SET_USED(pci_drv);
535 /* For secondary processes, the primary has done all the work */
536 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
539 if (pci_dev->mem_resource[0].addr == NULL ||
540 pci_dev->mem_resource[2].addr == NULL) {
541 octeontx_log_err("Empty bars %p %p",
542 pci_dev->mem_resource[0].addr,
543 pci_dev->mem_resource[2].addr);
546 bar0 = pci_dev->mem_resource[0].addr;
547 bar2 = pci_dev->mem_resource[2].addr;
549 octeontx_pkovf_setup();
551 /* get vfid and domain */
552 val = octeontx_read64(bar0 + PKO_VF_DQ_FC_CONFIG);
553 domain = (val >> 7) & 0xffff;
554 vfid = (val >> 23) & 0xffff;
556 if (unlikely(vfid >= PKO_VF_MAX)) {
557 octeontx_log_err("pko: Invalid vfid %d", vfid);
561 res = &pko_vf_ctl.pko[vfid];
563 res->domain = domain;
567 octeontx_log_dbg("Domain=%d group=%d", res->domain, res->vfid);
571 #define PCI_VENDOR_ID_CAVIUM 0x177D
572 #define PCI_DEVICE_ID_OCTEONTX_PKO_VF 0xA049
574 static const struct rte_pci_id pci_pkovf_map[] = {
576 RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
577 PCI_DEVICE_ID_OCTEONTX_PKO_VF)
584 static struct rte_pci_driver pci_pkovf = {
585 .id_table = pci_pkovf_map,
586 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
587 .probe = pkovf_probe,
590 RTE_PMD_REGISTER_PCI(octeontx_pkovf, pci_pkovf);