#include <ethdev_pci.h>
-#include "otx2_common.h"
#include "otx_ep_common.h"
#include "otx_ep_vf.h"
#include "otx2_ep_vf.h"
otx_epvf = OTX_EP_DEV(eth_dev);
- devinfo->speed_capa = ETH_LINK_SPEED_10G;
+ devinfo->speed_capa = RTE_ETH_LINK_SPEED_10G;
devinfo->max_rx_queues = otx_epvf->max_rx_queues;
devinfo->max_tx_queues = otx_epvf->max_tx_queues;
devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
- devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
- devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
- devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
+ devinfo->rx_offload_capa = RTE_ETH_RX_OFFLOAD_SCATTER;
+ devinfo->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
return 0;
}
+static int
+otx_ep_dev_start(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_epvf;
+ unsigned int q;
+ int ret;
+
+ otx_epvf = (struct otx_ep_device *)OTX_EP_DEV(eth_dev);
+ /* Enable IQ/OQ for this device */
+ ret = otx_epvf->fn_list.enable_io_queues(otx_epvf);
+ if (ret) {
+ otx_ep_err("IOQ enable failed\n");
+ return ret;
+ }
+
+ for (q = 0; q < otx_epvf->nb_rx_queues; q++) {
+ rte_write32(otx_epvf->droq[q]->nb_desc,
+ otx_epvf->droq[q]->pkts_credit_reg);
+
+ rte_wmb();
+ otx_ep_info("OQ[%d] dbells [%d]\n", q,
+ rte_read32(otx_epvf->droq[q]->pkts_credit_reg));
+ }
+
+ otx_ep_info("dev started\n");
+
+ return 0;
+}
+
+/* Stop device and disable input/output functions */
+static int
+otx_ep_dev_stop(struct rte_eth_dev *eth_dev)
+{
+ struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
+
+ otx_epvf->fn_list.disable_io_queues(otx_epvf);
+
+ return 0;
+}
+
static int
otx_ep_chip_specific_setup(struct otx_ep_device *otx_epvf)
{
ret = otx_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.disable_io_queues(otx_epvf);
break;
- case PCI_DEVID_OCTEONTX2_EP_NET_VF:
+ case PCI_DEVID_CN9K_EP_NET_VF:
case PCI_DEVID_CN98XX_EP_NET_VF:
otx_epvf->chip_id = dev_id;
ret = otx2_ep_vf_setup_device(otx_epvf);
otx_epvf->fn_list.setup_device_regs(otx_epvf);
+ otx_epvf->eth_dev->rx_pkt_burst = &otx_ep_recv_pkts;
+ if (otx_epvf->chip_id == PCI_DEVID_OCTEONTX_EP_VF)
+ otx_epvf->eth_dev->tx_pkt_burst = &otx_ep_xmit_pkts;
+ else if (otx_epvf->chip_id == PCI_DEVID_CN9K_EP_NET_VF ||
+ otx_epvf->chip_id == PCI_DEVID_CN98XX_EP_NET_VF)
+ otx_epvf->eth_dev->tx_pkt_burst = &otx2_ep_xmit_pkts;
ethdev_queues = (uint32_t)(otx_epvf->sriov_info.rings_per_vf);
otx_epvf->max_rx_queues = ethdev_queues;
otx_epvf->max_tx_queues = ethdev_queues;
* Release the receive queue/ringbuffer. Called by
* the upper layers.
*
- * @param rxq
- * Opaque pointer to the receive queue to release
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param q_no
+ * Receive queue index.
*
* @return
* - nothing
*/
static void
-otx_ep_rx_queue_release(void *rxq)
+otx_ep_rx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
{
- struct otx_ep_droq *rq = (struct otx_ep_droq *)rxq;
+ struct otx_ep_droq *rq = dev->data->rx_queues[q_no];
struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
int q_id = rq->q_no;
* Release the transmit queue/ringbuffer. Called by
* the upper layers.
*
- * @param txq
- * Opaque pointer to the transmit queue to release
+ * @param dev
+ * Pointer to Ethernet device structure.
+ * @param q_no
+ * Transmit queue index.
*
* @return
* - nothing
*/
static void
-otx_ep_tx_queue_release(void *txq)
+otx_ep_tx_queue_release(struct rte_eth_dev *dev, uint16_t q_no)
{
- struct otx_ep_instr_queue *tq = (struct otx_ep_instr_queue *)txq;
+ struct otx_ep_instr_queue *tq = dev->data->tx_queues[q_no];
otx_ep_delete_iqs(tq->otx_ep_dev, tq->q_no);
}
/* Define our ethernet definitions */
static const struct eth_dev_ops otx_ep_eth_dev_ops = {
.dev_configure = otx_ep_dev_configure,
+ .dev_start = otx_ep_dev_start,
+ .dev_stop = otx_ep_dev_stop,
.rx_queue_setup = otx_ep_rx_queue_setup,
.rx_queue_release = otx_ep_rx_queue_release,
.tx_queue_setup = otx_ep_tx_queue_setup,
otx_epvf = OTX_EP_DEV(eth_dev);
+ otx_epvf->fn_list.disable_io_queues(otx_epvf);
+
num_queues = otx_epvf->nb_rx_queues;
for (q = 0; q < num_queues; q++) {
if (otx_ep_delete_oqs(otx_epvf, q)) {
otx_epdev_exit(eth_dev);
eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
return 0;
}
otx_epvf->pdev = pdev;
otx_epdev_init(otx_epvf);
- if (pdev->id.device_id == PCI_DEVID_OCTEONTX2_EP_NET_VF)
+ if (pdev->id.device_id == PCI_DEVID_CN9K_EP_NET_VF)
otx_epvf->pkind = SDP_OTX2_PKIND;
else
otx_epvf->pkind = SDP_PKIND;
/* Set of PCI devices this driver supports */
static const struct rte_pci_id pci_id_otx_ep_map[] = {
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX_EP_VF) },
- { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_EP_NET_VF) },
+ { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN9K_EP_NET_VF) },
{ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN98XX_EP_NET_VF) },
{ .vendor_id = 0, /* sentinel */ }
};
RTE_PMD_REGISTER_PCI(net_otx_ep, rte_otx_ep_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_otx_ep, pci_id_otx_ep_map);
RTE_PMD_REGISTER_KMOD_DEP(net_otx_ep, "* igb_uio | vfio-pci");
-RTE_LOG_REGISTER(otx_net_ep_logtype, pmd.net.octeontx_ep, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(otx_net_ep_logtype, NOTICE);