'otx_ep_ethdev.c',
'otx_ep_vf.c',
'otx2_ep_vf.c',
+ 'otx_ep_rxtx.c',
)
includes += include_directories('../../common/octeontx2')
#define OTX_EP_OQ_INFOPTR_MODE (0)
#define OTX_EP_OQ_REFIL_THRESHOLD (16)
+#define OTX_EP_PCI_RING_ALIGN 65536
+#define SDP_PKIND 40
+#define SDP_OTX2_PKIND 57
+#define OTX_EP_MAX_IOQS_PER_VF 8
#define otx_ep_info(fmt, args...) \
rte_log(RTE_LOG_INFO, otx_net_ep_logtype, \
uint32_t pending_list_size;
};
+/** Descriptor format.
+ * The descriptor ring is made of descriptors which have 2 64-bit values:
+ * -# Physical (bus) address of the data buffer.
+ * -# Physical (bus) address of a otx_ep_droq_info structure.
+ * The device DMA's incoming packets and its information at the address
+ * given by these descriptor fields.
+ */
+struct otx_ep_droq_desc {
+ /* The buffer pointer */
+ uint64_t buffer_ptr;
+
+ /* The Info pointer */
+ uint64_t info_ptr;
+};
+#define OTX_EP_DROQ_DESC_SIZE (sizeof(struct otx_ep_droq_desc))
+
+/* Receive Header */
+union otx_ep_rh {
+ uint64_t rh64;
+};
+#define OTX_EP_RH_SIZE (sizeof(union otx_ep_rh))
+
+/** Information about packet DMA'ed by OCTEON TX2.
+ * The format of the information available at Info Pointer after OCTEON TX2
+ * has posted a packet. Not all descriptors have valid information. Only
+ * the Info field of the first descriptor for a packet has information
+ * about the packet.
+ */
+struct otx_ep_droq_info {
+ /* The Length of the packet. */
+ uint64_t length;
+
+ /* The Output Receive Header. */
+ union otx_ep_rh rh;
+};
+#define OTX_EP_DROQ_INFO_SIZE (sizeof(struct otx_ep_droq_info))
+
+/* DROQ statistics. Each output queue has four stats fields. */
+struct otx_ep_droq_stats {
+ /* Number of packets received in this queue. */
+ uint64_t pkts_received;
+
+ /* Bytes received by this queue. */
+ uint64_t bytes_received;
+
+ /* Num of failures of rte_pktmbuf_alloc() */
+ uint64_t rx_alloc_failure;
+
+ /* Rx error */
+ uint64_t rx_err;
+
+ /* packets with data got ready after interrupt arrived */
+ uint64_t pkts_delayed_data;
+
+ /* packets dropped due to zero length */
+ uint64_t dropped_zlp;
+};
+
/* Structure to define the configuration attributes for each Output queue. */
struct otx_ep_oq_config {
/* Max number of OQs available */
uint32_t refill_threshold;
};
+/* The Descriptor Ring Output Queue(DROQ) structure. */
+struct otx_ep_droq {
+ struct otx_ep_device *otx_ep_dev;
+ /* The 8B aligned descriptor ring starts at this address. */
+ struct otx_ep_droq_desc *desc_ring;
+
+ uint32_t q_no;
+ uint64_t last_pkt_count;
+
+ struct rte_mempool *mpool;
+
+ /* Driver should read the next packet at this index */
+ uint32_t read_idx;
+
+ /* OCTEON TX2 will write the next packet at this index */
+ uint32_t write_idx;
+
+ /* At this index, the driver will refill the descriptor's buffer */
+ uint32_t refill_idx;
+
+ /* Packets pending to be processed */
+ uint64_t pkts_pending;
+
+ /* Number of descriptors in this ring. */
+ uint32_t nb_desc;
+
+ /* The number of descriptors pending to refill. */
+ uint32_t refill_count;
+
+ uint32_t refill_threshold;
+
+ /* The 8B aligned info ptrs begin from this address. */
+ struct otx_ep_droq_info *info_list;
+
+ /* receive buffer list contains mbuf ptr list */
+ struct rte_mbuf **recv_buf_list;
+
+ /* The size of each buffer pointed by the buffer pointer. */
+ uint32_t buffer_size;
+
+ /* Statistics for this DROQ. */
+ struct otx_ep_droq_stats stats;
+
+ /* DMA mapped address of the DROQ descriptor ring. */
+ size_t desc_ring_dma;
+
+ /* Info_ptr list is allocated at this virtual address. */
+ size_t info_base_addr;
+
+ /* DMA mapped address of the info list */
+ size_t info_list_dma;
+
+ /* Allocated size of info list. */
+ uint32_t info_alloc_size;
+
+ /* Memory zone **/
+ const struct rte_memzone *desc_ring_mz;
+ const struct rte_memzone *info_mz;
+};
+#define OTX_EP_DROQ_SIZE (sizeof(struct otx_ep_droq))
+
+/* IQ/OQ mask */
+struct otx_ep_io_enable {
+ uint64_t iq;
+ uint64_t oq;
+ uint64_t iq64B;
+};
+
/* Structure to define the configuration. */
struct otx_ep_config {
/* Input Queue attributes. */
/* Required functions for each VF device */
struct otx_ep_fn_list {
+ void (*setup_oq_regs)(struct otx_ep_device *otx_ep, uint32_t q_no);
+
void (*setup_device_regs)(struct otx_ep_device *otx_ep);
+
+ void (*disable_io_queues)(struct otx_ep_device *otx_ep);
};
/* OTX_EP EP VF device data structure */
uint16_t chip_id;
+ uint32_t pkind;
+
struct rte_eth_dev *eth_dev;
int port_id;
uint32_t max_rx_queues;
+ /* Num OQs */
+ uint32_t nb_rx_queues;
+
+ /* The DROQ output queues */
+ struct otx_ep_droq *droq[OTX_EP_MAX_IOQS_PER_VF];
+
+ /* IOQ mask */
+ struct otx_ep_io_enable io_qmask;
+
/* SR-IOV info */
struct otx_ep_sriov_info sriov_info;
uint64_t tx_offloads;
};
+int otx_ep_setup_oqs(struct otx_ep_device *otx_ep, int oq_no, int num_descs,
+ int desc_size, struct rte_mempool *mpool,
+ unsigned int socket_id);
+int otx_ep_delete_oqs(struct otx_ep_device *otx_ep, uint32_t oq_no);
+
#define OTX_EP_MAX_PKT_SZ 64000U
#define OTX_EP_MAX_MAC_ADDRS 1
case PCI_DEVID_OCTEONTX_EP_VF:
otx_epvf->chip_id = dev_id;
ret = otx_ep_vf_setup_device(otx_epvf);
+ otx_epvf->fn_list.disable_io_queues(otx_epvf);
break;
case PCI_DEVID_OCTEONTX2_EP_NET_VF:
case PCI_DEVID_CN98XX_EP_NET_VF:
otx_epvf->chip_id = dev_id;
ret = otx2_ep_vf_setup_device(otx_epvf);
+ otx_epvf->fn_list.disable_io_queues(otx_epvf);
break;
default:
otx_ep_err("Unsupported device\n");
return 0;
}
+/**
+ * Setup our receive queue/ringbuffer. This is the
+ * queue the Octeon uses to send us packets and
+ * responses. We are given a memory pool for our
+ * packet buffers that are used to populate the receive
+ * queue.
+ *
+ * @param eth_dev
+ * Pointer to the structure rte_eth_dev
+ * @param q_no
+ * Queue number
+ * @param num_rx_descs
+ * Number of entries in the queue
+ * @param socket_id
+ * Where to allocate memory
+ * @param rx_conf
+ * Pointer to the struction rte_eth_rxconf
+ * @param mp
+ * Pointer to the packet pool
+ *
+ * @return
+ * - On success, return 0
+ * - On failure, return -1
+ */
+static int
+otx_ep_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
+ uint16_t num_rx_descs, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf __rte_unused,
+ struct rte_mempool *mp)
+{
+ struct otx_ep_device *otx_epvf = OTX_EP_DEV(eth_dev);
+ struct rte_pktmbuf_pool_private *mbp_priv;
+ uint16_t buf_size;
+
+ if (q_no >= otx_epvf->max_rx_queues) {
+ otx_ep_err("Invalid rx queue number %u\n", q_no);
+ return -EINVAL;
+ }
+
+ if (num_rx_descs & (num_rx_descs - 1)) {
+ otx_ep_err("Invalid rx desc number should be pow 2 %u\n",
+ num_rx_descs);
+ return -EINVAL;
+ }
+ if (num_rx_descs < (SDP_GBL_WMARK * 8)) {
+ otx_ep_err("Invalid rx desc number should at least be greater than 8xwmark %u\n",
+ num_rx_descs);
+ return -EINVAL;
+ }
+
+ otx_ep_dbg("setting up rx queue %u\n", q_no);
+
+ mbp_priv = rte_mempool_get_priv(mp);
+ buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
+
+ if (otx_ep_setup_oqs(otx_epvf, q_no, num_rx_descs, buf_size, mp,
+ socket_id)) {
+ otx_ep_err("droq allocation failed\n");
+ return -1;
+ }
+
+ eth_dev->data->rx_queues[q_no] = otx_epvf->droq[q_no];
+
+ return 0;
+}
+
+/**
+ * Release the receive queue/ringbuffer. Called by
+ * the upper layers.
+ *
+ * @param rxq
+ * Opaque pointer to the receive queue to release
+ *
+ * @return
+ * - nothing
+ */
+static void
+otx_ep_rx_queue_release(void *rxq)
+{
+ struct otx_ep_droq *rq = (struct otx_ep_droq *)rxq;
+ struct otx_ep_device *otx_epvf = rq->otx_ep_dev;
+ int q_id = rq->q_no;
+
+ if (otx_ep_delete_oqs(otx_epvf, q_id))
+ otx_ep_err("Failed to delete OQ:%d\n", q_id);
+}
+
/* Define our ethernet definitions */
static const struct eth_dev_ops otx_ep_eth_dev_ops = {
.dev_configure = otx_ep_dev_configure,
+ .rx_queue_setup = otx_ep_rx_queue_setup,
+ .rx_queue_release = otx_ep_rx_queue_release,
.dev_infos_get = otx_ep_dev_info_get,
};
static int
-otx_ep_eth_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
+otx_epdev_exit(struct rte_eth_dev *eth_dev)
{
+ struct otx_ep_device *otx_epvf;
+ uint32_t num_queues, q;
+
+ otx_ep_info("%s:\n", __func__);
+
+ otx_epvf = OTX_EP_DEV(eth_dev);
+
+ num_queues = otx_epvf->nb_rx_queues;
+ for (q = 0; q < num_queues; q++) {
+ if (otx_ep_delete_oqs(otx_epvf, q)) {
+ otx_ep_err("Failed to delete OQ:%d\n", q);
+ return -EINVAL;
+ }
+ }
+ otx_ep_info("Num OQs:%d freed\n", otx_epvf->nb_rx_queues);
+
+ return 0;
+}
+
+static int
+otx_ep_eth_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+ otx_epdev_exit(eth_dev);
+
+ eth_dev->dev_ops = NULL;
+
return 0;
}
eth_dev->data->mac_addrs = rte_zmalloc("otx_ep", RTE_ETHER_ADDR_LEN, 0);
if (eth_dev->data->mac_addrs == NULL) {
otx_ep_err("MAC addresses memory allocation failed\n");
+ eth_dev->dev_ops = NULL;
return -ENOMEM;
}
rte_eth_random_addr(vf_mac_addr.addr_bytes);
otx_epvf->pdev = pdev;
otx_epdev_init(otx_epvf);
+ if (pdev->id.device_id == PCI_DEVID_OCTEONTX2_EP_NET_VF)
+ otx_epvf->pkind = SDP_OTX2_PKIND;
+ else
+ otx_epvf->pkind = SDP_PKIND;
+ otx_ep_info("using pkind %d\n", otx_epvf->pkind);
return 0;
}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include <unistd.h>
+
+#include <rte_eal.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <ethdev_pci.h>
+
+#include "otx_ep_common.h"
+#include "otx_ep_vf.h"
+#include "otx2_ep_vf.h"
+#include "otx_ep_rxtx.h"
+
+static void
+otx_ep_dmazone_free(const struct rte_memzone *mz)
+{
+ const struct rte_memzone *mz_tmp;
+ int ret = 0;
+
+ if (mz == NULL) {
+ otx_ep_err("Memzone %s : NULL\n", mz->name);
+ return;
+ }
+
+ mz_tmp = rte_memzone_lookup(mz->name);
+ if (mz_tmp == NULL) {
+ otx_ep_err("Memzone %s Not Found\n", mz->name);
+ return;
+ }
+
+ ret = rte_memzone_free(mz);
+ if (ret)
+ otx_ep_err("Memzone free failed : ret = %d\n", ret);
+}
+
+static void
+otx_ep_droq_reset_indices(struct otx_ep_droq *droq)
+{
+ droq->read_idx = 0;
+ droq->write_idx = 0;
+ droq->refill_idx = 0;
+ droq->refill_count = 0;
+ droq->last_pkt_count = 0;
+ droq->pkts_pending = 0;
+}
+
+static void
+otx_ep_droq_destroy_ring_buffers(struct otx_ep_droq *droq)
+{
+ uint32_t idx;
+
+ for (idx = 0; idx < droq->nb_desc; idx++) {
+ if (droq->recv_buf_list[idx]) {
+ rte_pktmbuf_free(droq->recv_buf_list[idx]);
+ droq->recv_buf_list[idx] = NULL;
+ }
+ }
+
+ otx_ep_droq_reset_indices(droq);
+}
+
+/* Free OQs resources */
+int
+otx_ep_delete_oqs(struct otx_ep_device *otx_ep, uint32_t oq_no)
+{
+ struct otx_ep_droq *droq;
+
+ droq = otx_ep->droq[oq_no];
+ if (droq == NULL) {
+ otx_ep_err("Invalid droq[%d]\n", oq_no);
+ return -EINVAL;
+ }
+
+ otx_ep_droq_destroy_ring_buffers(droq);
+ rte_free(droq->recv_buf_list);
+ droq->recv_buf_list = NULL;
+
+ if (droq->desc_ring_mz) {
+ otx_ep_dmazone_free(droq->desc_ring_mz);
+ droq->desc_ring_mz = NULL;
+ }
+
+ memset(droq, 0, OTX_EP_DROQ_SIZE);
+
+ rte_free(otx_ep->droq[oq_no]);
+ otx_ep->droq[oq_no] = NULL;
+
+ otx_ep->nb_rx_queues--;
+
+ otx_ep_info("OQ[%d] is deleted\n", oq_no);
+ return 0;
+}
+
+static int
+otx_ep_droq_setup_ring_buffers(struct otx_ep_droq *droq)
+{
+ struct otx_ep_droq_desc *desc_ring = droq->desc_ring;
+ struct otx_ep_droq_info *info;
+ struct rte_mbuf *buf;
+ uint32_t idx;
+
+ for (idx = 0; idx < droq->nb_desc; idx++) {
+ buf = rte_pktmbuf_alloc(droq->mpool);
+ if (buf == NULL) {
+ otx_ep_err("OQ buffer alloc failed\n");
+ droq->stats.rx_alloc_failure++;
+ return -ENOMEM;
+ }
+
+ droq->recv_buf_list[idx] = buf;
+ info = rte_pktmbuf_mtod(buf, struct otx_ep_droq_info *);
+ memset(info, 0, sizeof(*info));
+ desc_ring[idx].buffer_ptr = rte_mbuf_data_iova_default(buf);
+ }
+
+ otx_ep_droq_reset_indices(droq);
+
+ return 0;
+}
+
+/* OQ initialization */
+static int
+otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
+ uint32_t num_descs, uint32_t desc_size,
+ struct rte_mempool *mpool, unsigned int socket_id)
+{
+ const struct otx_ep_config *conf = otx_ep->conf;
+ uint32_t c_refill_threshold;
+ struct otx_ep_droq *droq;
+ uint32_t desc_ring_size;
+
+ otx_ep_info("OQ[%d] Init start\n", q_no);
+
+ droq = otx_ep->droq[q_no];
+ droq->otx_ep_dev = otx_ep;
+ droq->q_no = q_no;
+ droq->mpool = mpool;
+
+ droq->nb_desc = num_descs;
+ droq->buffer_size = desc_size;
+ c_refill_threshold = RTE_MAX(conf->oq.refill_threshold,
+ droq->nb_desc / 2);
+
+ /* OQ desc_ring set up */
+ desc_ring_size = droq->nb_desc * OTX_EP_DROQ_DESC_SIZE;
+ droq->desc_ring_mz = rte_eth_dma_zone_reserve(otx_ep->eth_dev, "droq",
+ q_no, desc_ring_size,
+ OTX_EP_PCI_RING_ALIGN,
+ socket_id);
+
+ if (droq->desc_ring_mz == NULL) {
+ otx_ep_err("OQ:%d desc_ring allocation failed\n", q_no);
+ goto init_droq_fail;
+ }
+
+ droq->desc_ring_dma = droq->desc_ring_mz->iova;
+ droq->desc_ring = (struct otx_ep_droq_desc *)droq->desc_ring_mz->addr;
+
+ otx_ep_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
+ q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
+ otx_ep_dbg("OQ[%d]: num_desc: %d\n", q_no, droq->nb_desc);
+
+ /* OQ buf_list set up */
+ droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
+ (droq->nb_desc * sizeof(struct rte_mbuf *)),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (droq->recv_buf_list == NULL) {
+ otx_ep_err("OQ recv_buf_list alloc failed\n");
+ goto init_droq_fail;
+ }
+
+ if (otx_ep_droq_setup_ring_buffers(droq))
+ goto init_droq_fail;
+
+ droq->refill_threshold = c_refill_threshold;
+
+ /* Set up OQ registers */
+ otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
+
+ otx_ep->io_qmask.oq |= (1ull << q_no);
+
+ return 0;
+
+init_droq_fail:
+ return -ENOMEM;
+}
+
+/* OQ configuration and setup */
+int
+otx_ep_setup_oqs(struct otx_ep_device *otx_ep, int oq_no, int num_descs,
+ int desc_size, struct rte_mempool *mpool,
+ unsigned int socket_id)
+{
+ struct otx_ep_droq *droq;
+
+ /* Allocate new droq. */
+ droq = (struct otx_ep_droq *)rte_zmalloc("otx_ep_OQ",
+ sizeof(*droq), RTE_CACHE_LINE_SIZE);
+ if (droq == NULL) {
+ otx_ep_err("Droq[%d] Creation Failed\n", oq_no);
+ return -ENOMEM;
+ }
+ otx_ep->droq[oq_no] = droq;
+
+ if (otx_ep_init_droq(otx_ep, oq_no, num_descs, desc_size, mpool,
+ socket_id)) {
+ otx_ep_err("Droq[%d] Initialization failed\n", oq_no);
+ goto delete_OQ;
+ }
+ otx_ep_info("OQ[%d] is created.\n", oq_no);
+
+ otx_ep->nb_rx_queues++;
+
+ return 0;
+
+delete_OQ:
+ otx_ep_delete_oqs(otx_ep, oq_no);
+ return -ENOMEM;
+}
#define PCI_DEVID_OCTEONTX_EP_VF 0xa303
+/* this is a static value set by SLI PF driver in octeon
+ * No handshake is available
+ * Change this if changing the value in SLI PF driver
+ */
+#define SDP_GBL_WMARK 0x100
+
int
otx_ep_vf_setup_device(struct otx_ep_device *otx_ep);
#endif /*_OTX_EP_VF_H_ */