* Copyright 2019 NXP
*/
+#include <sys/epoll.h>
#include <rte_kvargs.h>
#include <rte_ethdev_vdev.h>
#include <rte_bus_vdev.h>
int8_t gem_id;
};
static struct pfe *g_pfe;
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
/* TODO: make pfe_svr a runtime option.
* Driver should be able to get the SVR
fclose(svr_file);
}
+static int pfe_eth_start(struct pfe_eth_priv_s *priv)
+{
+ gpi_enable(priv->GPI_baseaddr);
+ gemac_enable(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+static void
+pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
+ __rte_unused from_tx, __rte_unused int n_desc)
+{
+ struct rte_mbuf *mbuf;
+ unsigned int flags;
+
+ /* Clean HIF and client queue */
+ while ((mbuf = hif_lib_tx_get_next_complete(&priv->client,
+ tx_q_num, &flags,
+ HIF_TX_DESC_NT))) {
+ if (mbuf) {
+ mbuf->next = NULL;
+ mbuf->nb_segs = 1;
+ rte_pktmbuf_free(mbuf);
+ }
+ }
+}
+
+
+static void
+pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
+{
+ unsigned int ii;
+
+ for (ii = 0; ii < emac_txq_cnt; ii++)
+ pfe_eth_flush_txQ(priv, ii, 0, 0);
+}
+
+static int
+pfe_eth_event_handler(void *data, int event, __rte_unused int qno)
+{
+ struct pfe_eth_priv_s *priv = data;
+
+ switch (event) {
+ case EVENT_TXDONE_IND:
+ pfe_eth_flush_tx(priv);
+ hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
+ break;
+ case EVENT_HIGH_RX_WM:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static uint16_t
+pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_rx_queue *queue = rxq;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct epoll_event epoll_ev;
+ uint64_t ticks = 1; /* 1 msec */
+ int ret;
+ int have_something, work_done;
+
+#define RESET_STATUS (HIF_INT | HIF_RXPKT_INT)
+
+ /*TODO can we remove this cleanup from here?*/
+ pfe_tx_do_cleanup(priv->pfe);
+ have_something = pfe_hif_rx_process(priv->pfe, nb_pkts);
+ work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool,
+ rx_pkts, nb_pkts);
+
+ if (!have_something || !work_done) {
+ writel(RESET_STATUS, HIF_INT_SRC);
+ writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE);
+ ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks);
+ if (ret < 0 && errno != EINTR)
+ PFE_PMD_ERR("epoll_wait fails with %d\n", errno);
+ }
+
+ return work_done;
+}
+
+static uint16_t
+pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_rx_queue *queue = rxq;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct rte_mempool *pool;
+
+ /*TODO can we remove this cleanup from here?*/
+ pfe_tx_do_cleanup(priv->pfe);
+ pfe_hif_rx_process(priv->pfe, nb_pkts);
+ pool = priv->pfe->hif.shm->pool;
+
+ return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts);
+}
+
+static uint16_t
+pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_tx_queue *queue = tx_queue;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct rte_eth_stats *stats = &priv->stats;
+ int i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (tx_pkts[i]->nb_segs > 1) {
+ struct rte_mbuf *mbuf;
+ int j;
+
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
+ tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
+ tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER,
+ tx_pkts[i]);
+
+ mbuf = tx_pkts[i]->next;
+ for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) {
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(mbuf),
+ mbuf->buf_addr + mbuf->data_off,
+ mbuf->data_len,
+ 0x0, 0x0, mbuf);
+ mbuf = mbuf->next;
+ }
+
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(mbuf),
+ mbuf->buf_addr + mbuf->data_off,
+ mbuf->data_len,
+ 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
+ mbuf);
+ } else {
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
+ tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
+ tx_pkts[i]->pkt_len, 0 /*ctrl*/,
+ HIF_FIRST_BUFFER | HIF_LAST_BUFFER |
+ HIF_DATA_VALID,
+ tx_pkts[i]);
+ }
+ stats->obytes += tx_pkts[i]->pkt_len;
+ hif_tx_dma_start();
+ }
+ stats->opackets += nb_pkts;
+ pfe_tx_do_cleanup(priv->pfe);
+
+ return nb_pkts;
+}
+
+static uint16_t
+pfe_dummy_xmit_pkts(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+pfe_dummy_recv_pkts(__rte_unused void *rxq,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static int
+pfe_eth_open(struct rte_eth_dev *dev)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct hif_client_s *client;
+ struct hif_shm *hif_shm;
+ int rc;
+
+ /* Register client driver with HIF */
+ client = &priv->client;
+
+ if (client->pfe) {
+ hif_shm = client->pfe->hif.shm;
+ /* TODO please remove the below code of if block, once we add
+ * the proper cleanup in eth_close
+ */
+ if (!test_bit(PFE_CL_GEM0 + priv->id,
+ &hif_shm->g_client_status[0])) {
+ /* Register client driver with HIF */
+ memset(client, 0, sizeof(*client));
+ client->id = PFE_CL_GEM0 + priv->id;
+ client->tx_qn = emac_txq_cnt;
+ client->rx_qn = EMAC_RXQ_CNT;
+ client->priv = priv;
+ client->pfe = priv->pfe;
+ client->port_id = dev->data->port_id;
+ client->event_handler = pfe_eth_event_handler;
+
+ client->tx_qsize = EMAC_TXQ_DEPTH;
+ client->rx_qsize = EMAC_RXQ_DEPTH;
+
+ rc = hif_lib_client_register(client);
+ if (rc) {
+ PFE_PMD_ERR("hif_lib_client_register(%d)"
+ " failed", client->id);
+ goto err0;
+ }
+ } else {
+ /* Freeing the packets if already exists */
+ int ret = 0;
+ struct rte_mbuf *rx_pkts[32];
+ /* TODO multiqueue support */
+ ret = hif_lib_receive_pkt(&client->rx_q[0],
+ hif_shm->pool, rx_pkts, 32);
+ while (ret) {
+ int i;
+ for (i = 0; i < ret; i++)
+ rte_pktmbuf_free(rx_pkts[i]);
+ ret = hif_lib_receive_pkt(&client->rx_q[0],
+ hif_shm->pool,
+ rx_pkts, 32);
+ }
+ }
+ } else {
+ /* Register client driver with HIF */
+ memset(client, 0, sizeof(*client));
+ client->id = PFE_CL_GEM0 + priv->id;
+ client->tx_qn = emac_txq_cnt;
+ client->rx_qn = EMAC_RXQ_CNT;
+ client->priv = priv;
+ client->pfe = priv->pfe;
+ client->port_id = dev->data->port_id;
+ client->event_handler = pfe_eth_event_handler;
+
+ client->tx_qsize = EMAC_TXQ_DEPTH;
+ client->rx_qsize = EMAC_RXQ_DEPTH;
+
+ rc = hif_lib_client_register(client);
+ if (rc) {
+ PFE_PMD_ERR("hif_lib_client_register(%d) failed",
+ client->id);
+ goto err0;
+ }
+ }
+ rc = pfe_eth_start(priv);
+ dev->rx_pkt_burst = &pfe_recv_pkts;
+ dev->tx_pkt_burst = &pfe_xmit_pkts;
+ /* If no prefetch is configured. */
+ if (getenv("PFE_INTR_SUPPORT")) {
+ dev->rx_pkt_burst = &pfe_recv_pkts_on_intr;
+ PFE_PMD_INFO("PFE INTERRUPT Mode enabled");
+ }
+
+
+err0:
+ return rc;
+}
+
static int
pfe_eth_open_cdev(struct pfe_eth_priv_s *priv)
{
}
}
+static void
+pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ gemac_disable(priv->EMAC_baseaddr);
+ gpi_disable(priv->GPI_baseaddr);
+
+ dev->rx_pkt_burst = &pfe_dummy_recv_pkts;
+ dev->tx_pkt_burst = &pfe_dummy_xmit_pkts;
+}
+
static void
pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe)
{
PMD_INIT_FUNC_TRACE();
+ pfe_eth_stop(dev);
/* Close the device file for link status */
pfe_eth_close_cdev(dev->data->dev_private);
pfe->nb_devs--;
}
+static void
+pfe_eth_close(struct rte_eth_dev *dev)
+{
+ if (!dev)
+ return;
+
+ if (!g_pfe)
+ return;
+
+ pfe_eth_exit(dev, g_pfe);
+
+ if (g_pfe->nb_devs == 0) {
+ pfe_hif_exit(g_pfe);
+ pfe_hif_lib_exit(g_pfe);
+ rte_free(g_pfe);
+ g_pfe = NULL;
+ }
+}
+
+static int
+pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
+{
+ return 0;
+}
+
+static int
+pfe_eth_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
+{
+ struct pfe_eth_priv_s *internals = dev->data->dev_private;
+
+ dev_info->if_index = internals->id;
+ dev_info->max_mac_addrs = PFE_MAX_MACS;
+ dev_info->max_rx_queues = dev->data->nb_rx_queues;
+ dev_info->max_tx_queues = dev->data->nb_tx_queues;
+ dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
+ dev_info->rx_offload_capa = dev_rx_offloads_sup;
+ dev_info->tx_offload_capa = dev_tx_offloads_sup;
+ if (pfe_svr == SVR_LS1012A_REV1)
+ dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
+ else
+ dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE;
+
+ return 0;
+}
+
+/* Only first mb_pool given on first call of this API will be used
+ * in whole system, also nb_rx_desc and rx_conf are unused params
+ */
+static int
+pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ __rte_unused uint16_t nb_rx_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool)
+{
+ int rc = 0;
+ struct pfe *pfe;
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ pfe = priv->pfe;
+
+ if (queue_idx >= EMAC_RXQ_CNT) {
+ PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
+ queue_idx, EMAC_RXQ_CNT);
+ return -1;
+ }
+
+ if (!pfe->hif.setuped) {
+ rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
+ if (rc) {
+ PFE_PMD_ERR("Could not allocate buffer descriptors");
+ return -1;
+ }
+
+ pfe->hif.shm->pool = mb_pool;
+ if (pfe_hif_init_buffers(&pfe->hif)) {
+ PFE_PMD_ERR("Could not initialize buffer descriptors");
+ return -1;
+ }
+ hif_init();
+ hif_rx_enable();
+ hif_tx_enable();
+ pfe->hif.setuped = 1;
+ }
+ dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
+ priv->client.rx_q[queue_idx].queue_id = queue_idx;
+
+ return 0;
+}
+
+static void
+pfe_rx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static void
+pfe_tx_queue_release(void *q __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+pfe_tx_queue_setup(struct rte_eth_dev *dev,
+ uint16_t queue_idx,
+ __rte_unused uint16_t nb_desc,
+ __rte_unused unsigned int socket_id,
+ __rte_unused const struct rte_eth_txconf *tx_conf)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+
+ if (queue_idx >= emac_txq_cnt) {
+ PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
+ queue_idx, emac_txq_cnt);
+ return -1;
+ }
+ dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
+ priv->client.tx_q[queue_idx].queue_id = queue_idx;
+ return 0;
+}
+
+static const uint32_t *
+pfe_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+ static const uint32_t ptypes[] = {
+ /*todo -= add more types */
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L3_IPV4,
+ RTE_PTYPE_L3_IPV4_EXT,
+ RTE_PTYPE_L3_IPV6,
+ RTE_PTYPE_L3_IPV6_EXT,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_SCTP
+ };
+
+ if (dev->rx_pkt_burst == pfe_recv_pkts ||
+ dev->rx_pkt_burst == pfe_recv_pkts_on_intr)
+ return ptypes;
+ return NULL;
+}
+
+static int
+pfe_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct pfe_eth_priv_s *priv = dev->data->dev_private;
+ struct rte_eth_stats *eth_stats = &priv->stats;
+
+ if (stats == NULL)
+ return -1;
+
+ memset(stats, 0, sizeof(struct rte_eth_stats));
+
+ stats->ipackets = eth_stats->ipackets;
+ stats->ibytes = eth_stats->ibytes;
+ stats->opackets = eth_stats->opackets;
+ stats->obytes = eth_stats->obytes;
+
+ return 0;
+}
+
+static const struct eth_dev_ops ops = {
+ .dev_start = pfe_eth_open,
+ .dev_stop = pfe_eth_stop,
+ .dev_close = pfe_eth_close,
+ .dev_configure = pfe_eth_configure,
+ .dev_infos_get = pfe_eth_info,
+ .rx_queue_setup = pfe_rx_queue_setup,
+ .rx_queue_release = pfe_rx_queue_release,
+ .tx_queue_setup = pfe_tx_queue_setup,
+ .tx_queue_release = pfe_tx_queue_release,
+ .dev_supported_ptypes_get = pfe_supported_ptypes_get,
+ .stats_get = pfe_stats_get,
+};
+
static int
pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
{
}
eth_dev->data->mtu = 1500;
+ eth_dev->dev_ops = &ops;
+ pfe_eth_stop(eth_dev);
pfe_gemac_init(priv);
eth_dev->data->nb_rx_queues = 1;