uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
uint16_t tag_own; /* Last tag which was written to own channel */
+ uint16_t domain; /* Domain */
rte_spinlock_t lock;
};
}
int
-octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base)
+octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base, uint16_t domain)
{
struct mbox *m = &octeontx_mbox;
if (m->reg != NULL) {
rte_spinlock_init(&m->lock);
m->init_once = 1;
+ m->domain = domain;
}
return 0;
}
int
-octeontx_mbox_set_reg(uint8_t *reg)
+octeontx_mbox_set_reg(uint8_t *reg, uint16_t domain)
{
struct mbox *m = &octeontx_mbox;
if (m->ram_mbox_base != NULL) {
rte_spinlock_init(&m->lock);
m->init_once = 1;
+ m->domain = domain;
}
return 0;
return 0;
}
+
+uint16_t
+octeontx_get_global_domain(void)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ return m->domain;
+}
};
int octeontx_mbox_init(void);
-int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base);
-int octeontx_mbox_set_reg(uint8_t *reg);
+void octeontx_set_global_domain(uint16_t global_domain);
+uint16_t octeontx_get_global_domain(void);
+int octeontx_mbox_set_ram_mbox_base(uint8_t *ram_mbox_base, uint16_t domain);
+int octeontx_mbox_set_reg(uint8_t *reg, uint16_t domain);
int octeontx_mbox_send(struct octeontx_mbox_hdr *hdr,
void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
DPDK_20.0 {
global:
+ octeontx_get_global_domain;
octeontx_logtype_mbox;
octeontx_mbox_init;
octeontx_mbox_send;
sdev.total_ssowvfs++;
if (vfid == 0) {
ram_mbox_base = ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
- if (octeontx_mbox_set_ram_mbox_base(ram_mbox_base)) {
+ if (octeontx_mbox_set_ram_mbox_base(ram_mbox_base,
+ res->domain)) {
mbox_log_err("Invalid Failed to set ram mbox base");
return -EINVAL;
}
if (vfid == 0) {
reg = ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
reg += SSO_VHGRP_PF_MBOX(1);
- if (octeontx_mbox_set_reg(reg)) {
+ if (octeontx_mbox_set_reg(reg, res->domain)) {
mbox_log_err("Invalid Failed to set mbox_reg");
return -EINVAL;
}
#include <rte_eal.h>
#include <rte_bus_pci.h>
+#include "../octeontx_logs.h"
+#include "octeontx_io.h"
#include "octeontx_pkivf.h"
+
+struct octeontx_pkivf {
+ uint8_t *bar0;
+ uint8_t status;
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct octeontx_pki_vf_ctl_s {
+ struct octeontx_pkivf pki[PKI_VF_MAX];
+};
+
+static struct octeontx_pki_vf_ctl_s pki_vf_ctl;
+
int
octeontx_pki_port_open(int port)
{
+ uint16_t global_domain = octeontx_get_global_domain();
struct octeontx_mbox_hdr hdr;
- int res;
+ mbox_pki_port_t port_type = {
+ .port_type = OCTTX_PORT_TYPE_NET,
+ };
+ int i, res;
+
+ /* Check if atleast one PKI vf is in application domain. */
+ for (i = 0; i < PKI_VF_MAX; i++) {
+ if (pki_vf_ctl.pki[i].domain != global_domain)
+ continue;
+ break;
+ }
+
+ if (i == PKI_VF_MAX)
+ return -ENODEV;
hdr.coproc = OCTEONTX_PKI_COPROC;
hdr.msg = MBOX_PKI_PORT_OPEN;
hdr.vfid = port;
- res = octeontx_mbox_send(&hdr, NULL, 0, NULL, 0);
+ res = octeontx_mbox_send(&hdr, &port_type, sizeof(mbox_pki_port_t),
+ NULL, 0);
if (res < 0)
return -EACCES;
return res;
static int
pkivf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
{
- RTE_SET_USED(pci_drv);
- RTE_SET_USED(pci_dev);
+ struct octeontx_pkivf *res;
+ static uint8_t vf_cnt;
+ uint16_t domain;
+ uint16_t vfid;
+ uint8_t *bar0;
+ uint64_t val;
+ RTE_SET_USED(pci_drv);
/* For secondary processes, the primary has done all the work */
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ octeontx_log_err("PKI Empty bar[0] %p",
+ pci_dev->mem_resource[0].addr);
+ return -ENODEV;
+ }
+
+ bar0 = pci_dev->mem_resource[0].addr;
+ val = octeontx_read64(bar0);
+ domain = val & 0xffff;
+ vfid = (val >> 16) & 0xffff;
+
+ if (unlikely(vfid >= PKI_VF_MAX)) {
+ octeontx_log_err("pki: Invalid vfid %d", vfid);
+ return -EINVAL;
+ }
+
+ res = &pki_vf_ctl.pki[vf_cnt++];
+ res->vfid = vfid;
+ res->domain = domain;
+ res->bar0 = bar0;
+
+ octeontx_log_dbg("PKI Domain=%d vfid=%d", res->domain, res->vfid);
return 0;
}
MBOX_PKI_PARSE_NOTHING = 0x7f
};
+/* PKI maximum constants */
+#define PKI_VF_MAX (32)
+#define PKI_MAX_PKTLEN (32768)
+
/* Interface types: */
enum {
OCTTX_PORT_TYPE_NET, /* Network interface ports */
uint16_t index;
} mbox_pki_del_qos_t;
-/* PKI maximum constants */
-#define PKI_VF_MAX (1)
-#define PKI_MAX_PKTLEN (32768)
-
/* pki pkind parse mode */
enum {
PKI_PARSE_LA_TO_LG = 0,
};
#define PKO_IOMEM_NULL (struct octeontx_pko_iomem){0, 0, 0}
+#define PKO_VALID 0x1
+#define PKO_INUSE 0x2
struct octeontx_pko_fc_ctl_s {
int64_t buf_cnt;
struct octeontx_pkovf {
uint8_t *bar0;
uint8_t *bar2;
+ uint8_t status;
uint16_t domain;
uint16_t vfid;
};
struct octeontx_pko_vf_ctl_s {
rte_spinlock_t lock;
-
+ uint16_t global_domain;
struct octeontx_pko_iomem fc_iomem;
struct octeontx_pko_fc_ctl_s *fc_ctl;
struct octeontx_pkovf pko[PKO_VF_MAX];
curr.lmtline_va = ctl->pko[dq_vf].bar2;
curr.ioreg_va = (void *)((uintptr_t)ctl->pko[dq_vf].bar0
+ PKO_VF_DQ_OP_SEND((dq), 0));
- curr.fc_status_va = ctl->fc_ctl + dq;
+ curr.fc_status_va = ctl->fc_ctl + dq_num;
octeontx_log_dbg("lmtline=%p ioreg_va=%p fc_status_va=%p",
curr.lmtline_va, curr.ioreg_va,
int
octeontx_pko_vf_count(void)
{
+ uint16_t global_domain = octeontx_get_global_domain();
int vf_cnt;
+ pko_vf_ctl.global_domain = global_domain;
vf_cnt = 0;
while (pko_vf_ctl.pko[vf_cnt].bar0)
vf_cnt++;
return vf_cnt;
}
+size_t
+octeontx_pko_get_vfid(void)
+{
+ size_t vf_cnt = octeontx_pko_vf_count();
+ size_t vf_idx;
+
+
+ for (vf_idx = 0; vf_idx < vf_cnt; vf_idx++) {
+ if (!(pko_vf_ctl.pko[vf_idx].status & PKO_VALID))
+ continue;
+ if (pko_vf_ctl.pko[vf_idx].status & PKO_INUSE)
+ continue;
+
+ pko_vf_ctl.pko[vf_idx].status |= PKO_INUSE;
+ return pko_vf_ctl.pko[vf_idx].vfid;
+ }
+
+ return SIZE_MAX;
+}
+
int
octeontx_pko_init_fc(const size_t pko_vf_count)
{
/* Configure Flow-Control feature for all DQs of open VFs */
for (vf_idx = 0; vf_idx < pko_vf_count; vf_idx++) {
- dq_ix = vf_idx * PKO_VF_NUM_DQ;
+ if (pko_vf_ctl.pko[vf_idx].domain != pko_vf_ctl.global_domain)
+ continue;
+ dq_ix = pko_vf_ctl.pko[vf_idx].vfid * PKO_VF_NUM_DQ;
vf_bar0 = pko_vf_ctl.pko[vf_idx].bar0;
reg = (pko_vf_ctl.fc_iomem.iova +
(0x1 << 0); /* ENABLE */
octeontx_write64(reg, vf_bar0 + PKO_VF_DQ_FC_CONFIG);
+ pko_vf_ctl.pko[vf_idx].status = PKO_VALID;
octeontx_log_dbg("PKO: bar0 %p VF_idx %d DQ_FC_CFG=%" PRIx64 "",
vf_bar0, (int)vf_idx, reg);
uint16_t domain;
uint8_t *bar0;
uint8_t *bar2;
+ static uint8_t vf_cnt;
struct octeontx_pkovf *res;
RTE_SET_USED(pci_drv);
return -EINVAL;
}
- res = &pko_vf_ctl.pko[vfid];
+ res = &pko_vf_ctl.pko[vf_cnt++];
res->vfid = vfid;
res->domain = domain;
res->bar0 = bar0;
#ifndef __OCTEONTX_PKO_H__
#define __OCTEONTX_PKO_H__
+#include <octeontx_mbox.h>
+
/* PKO maximum constants */
#define PKO_VF_MAX (32)
#define PKO_VF_NUM_DQ (8)
int octeontx_pko_channel_start(int chanid);
int octeontx_pko_channel_stop(int chanid);
int octeontx_pko_vf_count(void);
+size_t octeontx_pko_get_vfid(void);
int octeontx_pko_init_fc(const size_t pko_vf_count);
void octeontx_pko_fc_free(void);
nic->num_tx_queues = dev->data->nb_tx_queues;
- ret = octeontx_pko_channel_open(nic->port_id * PKO_VF_NUM_DQ,
+ ret = octeontx_pko_channel_open(nic->pko_vfid * PKO_VF_NUM_DQ,
nic->num_tx_queues,
nic->base_ochan);
if (ret) {
RTE_SET_USED(nb_desc);
RTE_SET_USED(socket_id);
- dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx;
+ dq_num = (nic->pko_vfid * PKO_VF_NUM_DQ) + qidx;
/* Socket id check */
if (socket_id != (unsigned int)SOCKET_ID_ANY &&
int socket_id)
{
int res;
+ size_t pko_vfid;
char octtx_name[OCTEONTX_MAX_NAME_LEN];
struct octeontx_nic *nic = NULL;
struct rte_eth_dev *eth_dev = NULL;
goto err;
}
data->dev_private = nic;
+ pko_vfid = octeontx_pko_get_vfid();
+ if (pko_vfid == SIZE_MAX) {
+ octeontx_log_err("failed to get pko vfid");
+ res = -ENODEV;
+ goto err;
+ }
+
+ nic->pko_vfid = pko_vfid;
nic->port_id = port;
nic->evdev = evdev;
uint8_t mcast_mode;
uint16_t num_tx_queues;
uint64_t hwcap;
+ uint8_t pko_vfid;
uint8_t link_up;
uint8_t duplex;
uint8_t speed;