``net_octeontx2`` pmd supports multicast mac filtering feature only on physical
function devices.
+SDP interface support
+~~~~~~~~~~~~~~~~~~~~~
+OCTEON TX2 SDP interface support is limited to PF device, No VF support.
+
Debugging Options
-----------------
+---+-----+--------------------------------------------------------------+
| 8 | DPI | rte_rawdev |
+---+-----+--------------------------------------------------------------+
+ | 9 | SDP | rte_ethdev |
+ +---+-----+--------------------------------------------------------------+
PF0 is called the administrative / admin function (AF) and has exclusive
privileges to provision RVU functional block's LFs to each of the PF/VF.
#. Exception path to Linux kernel from DPDK application as SW ``KNI`` replacement.
#. Communication between two different DPDK applications.
+SDP interface
+-------------
+
+System DPI Packet Interface unit(SDP) provides PCIe endpoint support for remote host
+to DMA packets into and out of OCTEON TX2 SoC. SDP interface comes in to live only when
+OCTEON TX2 SoC is connected in PCIe endpoint mode. It can be used to send/receive
+packets to/from remote host machine using input/output queue pairs exposed to it.
+SDP interface receives input packets from remote host from NIX-RX and sends packets
+to remote host using NIX-TX. Remote host machine need to use corresponding driver
+(kernel/user mode) to communicate with SDP interface on OCTEON TX2 SoC. SDP supports
+single PCIe SRIOV physical function(PF) and multiple virtual functions(VF's). Users
+can bind PF or VF to use SDP interface and it will be enumerated as ethdev ports.
+
+The primary use case for SDP is to enable the smart NIC use case. Typical usage models are,
+
+#. Communication channel between remote host and OCTEON TX2 SoC over PCIe.
+#. Transfer packets received from network interface to remote host over PCIe and
+ vice-versa.
+
OCTEON TX2 packet flow
----------------------
#define PCI_DEVID_OCTEONTX2_RVU_CPT_VF 0xA0FE
#define PCI_DEVID_OCTEONTX2_RVU_AF_VF 0xA0f8
#define PCI_DEVID_OCTEONTX2_DPI_VF 0xA081
+#define PCI_DEVID_OCTEONTX2_RVU_SDP_PF 0xA0f6
+#define PCI_DEVID_OCTEONTX2_RVU_SDP_VF 0xA0f7
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX_95XX 0xB200
case PCI_DEVID_OCTEONTX2_RVU_CPT_VF:
case PCI_DEVID_OCTEONTX2_RVU_AF_VF:
case PCI_DEVID_OCTEONTX2_RVU_VF:
+ case PCI_DEVID_OCTEONTX2_RVU_SDP_VF:
dev->hwcap |= OTX2_HWCAP_F_VF;
break;
}
#define otx2_dev_is_lbk(dev) ((dev->hwcap & OTX2_HWCAP_F_VF) && \
(dev->tx_chan_base < 0x700))
#define otx2_dev_revid(dev) (dev->hwcap & 0xFF)
+#define otx2_dev_is_sdp(dev) (dev->sdp_link)
+
+#define otx2_dev_is_vf_or_sdp(dev) \
+ (otx2_dev_is_vf(dev) || otx2_dev_is_sdp(dev))
#define otx2_dev_is_A0(dev) \
((RVU_PCI_REV_MAJOR(otx2_dev_revid(dev)) == 0x0) && \
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_start_rxtx(mbox);
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_stop_rxtx(mbox);
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_start_linkevents(mbox);
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return 0;
if (en)
{
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_stop_linkevents(mbox);
return otx2_mbox_process(mbox);
}
+static bool
+otx2_eth_dev_is_sdp(struct rte_pci_device *pci_dev)
+{
+ if (pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_PF ||
+ pci_dev->id.device_id == PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
+ return true;
+ return false;
+}
+
static int
otx2_eth_dev_init(struct rte_eth_dev *eth_dev)
{
goto error;
}
}
+ if (otx2_eth_dev_is_sdp(pci_dev))
+ dev->sdp_link = true;
+ else
+ dev->sdp_link = false;
/* Device generic callbacks */
dev->ops = &otx2_dev_ops;
dev->eth_dev = eth_dev;
RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
PCI_DEVID_OCTEONTX2_RVU_AF_VF)
},
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_SDP_PF)
+ },
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_SDP_VF)
+ },
{
.vendor_id = 0,
},
uint64_t clk_delta;
bool mc_tbl_set;
struct otx2_nix_mc_filter_tbl mc_fltr_tbl;
+ bool sdp_link; /* SDP flag */
} __rte_cache_aligned;
struct otx2_eth_txq {
req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
req->update_smq = true;
+ if (otx2_dev_is_sdp(dev))
+ req->sdp_link = true;
/* FRS HW config should exclude FCS but include NPC VTAG insert size */
req->maxlen = frame_size - RTE_ETHER_CRC_LEN + NIX_MAX_VTAG_ACT_SIZE;
/* Now just update Rx MAXLEN */
req = otx2_mbox_alloc_msg_nix_set_hw_frs(mbox);
req->maxlen = frame_size - RTE_ETHER_CRC_LEN;
+ if (otx2_dev_is_sdp(dev))
+ req->sdp_link = true;
rc = otx2_mbox_process(mbox);
if (rc)
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
struct otx2_mbox *mbox = dev->mbox;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return;
if (en)
struct nix_bp_cfg_rsp *rsp;
int rc;
+ if (otx2_dev_is_sdp(dev))
+ return 0;
+
if (enb) {
req = otx2_mbox_alloc_msg_nix_bp_enable(mbox);
req->chan_base = 0;
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
struct rte_eth_fc_conf fc_conf;
- if (otx2_dev_is_lbk(dev))
+ if (otx2_dev_is_lbk(dev) || otx2_dev_is_sdp(dev))
return 0;
memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
RTE_SET_USED(wait_to_complete);
- if (otx2_dev_is_lbk(dev))
+ if (otx2_dev_is_lbk(dev) || otx2_dev_is_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_get_linkinfo(mbox);
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
int rc, i;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return -ENOTSUP;
rc = nix_dev_set_link_state(eth_dev, 1);
struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
int i;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return -ENOTSUP;
/* Stop tx queues */
struct otx2_mbox *mbox = dev->mbox;
int rc;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return -ENOTSUP;
if (otx2_dev_active_vfs(dev))
struct otx2_mbox *mbox = dev->mbox;
int rc;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return 0;
otx2_mbox_alloc_msg_cgx_mac_max_entries_get(mbox);
struct cgx_mac_addr_add_rsp *rsp;
int rc;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return -ENOTSUP;
if (otx2_dev_active_vfs(dev))
struct cgx_mac_addr_del_req *req;
int rc;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return;
req = otx2_mbox_alloc_msg_cgx_mac_addr_del(mbox);
struct otx2_mbox *mbox = dev->mbox;
uint8_t rc = -EINVAL;
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return rc;
if (en) {
}
/* If we are VF, no further action can be taken */
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return -EINVAL;
if (!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)) {
}
/* If we are VF, nothing else can be done */
- if (otx2_dev_is_vf(dev))
+ if (otx2_dev_is_vf_or_sdp(dev))
return -EINVAL;
dev->rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP;
*regval++ = shaper2regval(&cir) | 1;
req->num_regs++;
}
+ /* Configure TL4 to send to SDP channel instead of CGX/LBK */
+ if (otx2_dev_is_sdp(dev)) {
+ *reg++ = NIX_AF_TL4X_SDP_LINK_CFG(schq);
+ *regval++ = BIT_ULL(12);
+ req->num_regs++;
+ }
rc = send_tm_reqval(mbox, req);
if (rc)
else
*regval++ = (strict_schedul_prio << 24) | rr_quantum;
req->num_regs++;
- *reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq, nix_get_link(dev));
- *regval++ = BIT_ULL(12) | nix_get_relchan(dev);
- req->num_regs++;
+ if (!otx2_dev_is_sdp(dev)) {
+ *reg++ = NIX_AF_TL3_TL2X_LINKX_CFG(schq,
+ nix_get_link(dev));
+ *regval++ = BIT_ULL(12) | nix_get_relchan(dev);
+ req->num_regs++;
+ }
if (pir.rate && pir.burst) {
*reg++ = NIX_AF_TL2X_PIR(schq);
*regval++ = shaper2regval(&pir) | 1;
uint32_t lvl;
int rc = 0;
- if (nix_get_link(dev) == 13)
- return -EPERM;
-
for (lvl = 0; lvl < (uint32_t)dev->otx2_tm_root_lvl + 1; lvl++) {
TAILQ_FOREACH(tm_node, &dev->node_list, node) {
if (tm_node->hw_lvl_id == lvl) {