1 /* SPDX-License-Identifier: BSD-3-Clause
6 #include <rte_kvargs.h>
7 #include <rte_ethdev_vdev.h>
8 #include <rte_bus_vdev.h>
15 #define PFE_MAX_MACS 1 /*we can support upto 4 MACs per IF*/
16 #define PFE_VDEV_GEM_ID_ARG "intf"
18 struct pfe_vdev_init_params {
21 static struct pfe *g_pfe;
22 /* Supported Rx offloads */
23 static uint64_t dev_rx_offloads_sup =
24 DEV_RX_OFFLOAD_IPV4_CKSUM |
25 DEV_RX_OFFLOAD_UDP_CKSUM |
26 DEV_RX_OFFLOAD_TCP_CKSUM;
28 /* Supported Tx offloads */
29 static uint64_t dev_tx_offloads_sup =
30 DEV_TX_OFFLOAD_IPV4_CKSUM |
31 DEV_TX_OFFLOAD_UDP_CKSUM |
32 DEV_TX_OFFLOAD_TCP_CKSUM;
34 /* TODO: make pfe_svr a runtime option.
35 * Driver should be able to get the SVR
36 * information from HW.
38 unsigned int pfe_svr = SVR_LS1012A_REV1;
39 static void *cbus_emac_base[3];
40 static void *cbus_gpi_base[3];
47 pfe_gemac_init(struct pfe_eth_priv_s *priv)
51 cfg.speed = SPEED_1000M;
52 cfg.duplex = DUPLEX_FULL;
54 gemac_set_config(priv->EMAC_baseaddr, &cfg);
55 gemac_allow_broadcast(priv->EMAC_baseaddr);
56 gemac_enable_1536_rx(priv->EMAC_baseaddr);
57 gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
58 gemac_enable_pause_rx(priv->EMAC_baseaddr);
59 gemac_set_bus_width(priv->EMAC_baseaddr, 64);
60 gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
66 pfe_soc_version_get(void)
68 FILE *svr_file = NULL;
69 unsigned int svr_ver = 0;
71 PMD_INIT_FUNC_TRACE();
73 svr_file = fopen(PFE_SOC_ID_FILE, "r");
75 PFE_PMD_ERR("Unable to open SoC device");
76 return; /* Not supported on this infra */
79 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
82 PFE_PMD_ERR("Unable to read SoC device");
87 static int pfe_eth_start(struct pfe_eth_priv_s *priv)
89 gpi_enable(priv->GPI_baseaddr);
90 gemac_enable(priv->EMAC_baseaddr);
96 pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
97 __rte_unused from_tx, __rte_unused int n_desc)
99 struct rte_mbuf *mbuf;
102 /* Clean HIF and client queue */
103 while ((mbuf = hif_lib_tx_get_next_complete(&priv->client,
109 rte_pktmbuf_free(mbuf);
116 pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
120 for (ii = 0; ii < emac_txq_cnt; ii++)
121 pfe_eth_flush_txQ(priv, ii, 0, 0);
125 pfe_eth_event_handler(void *data, int event, __rte_unused int qno)
127 struct pfe_eth_priv_s *priv = data;
130 case EVENT_TXDONE_IND:
131 pfe_eth_flush_tx(priv);
132 hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
134 case EVENT_HIGH_RX_WM:
143 pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
145 struct hif_client_rx_queue *queue = rxq;
146 struct pfe_eth_priv_s *priv = queue->priv;
147 struct epoll_event epoll_ev;
148 uint64_t ticks = 1; /* 1 msec */
150 int have_something, work_done;
152 #define RESET_STATUS (HIF_INT | HIF_RXPKT_INT)
154 /*TODO can we remove this cleanup from here?*/
155 pfe_tx_do_cleanup(priv->pfe);
156 have_something = pfe_hif_rx_process(priv->pfe, nb_pkts);
157 work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool,
160 if (!have_something || !work_done) {
161 writel(RESET_STATUS, HIF_INT_SRC);
162 writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE);
163 ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks);
164 if (ret < 0 && errno != EINTR)
165 PFE_PMD_ERR("epoll_wait fails with %d\n", errno);
172 pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
174 struct hif_client_rx_queue *queue = rxq;
175 struct pfe_eth_priv_s *priv = queue->priv;
176 struct rte_mempool *pool;
178 /*TODO can we remove this cleanup from here?*/
179 pfe_tx_do_cleanup(priv->pfe);
180 pfe_hif_rx_process(priv->pfe, nb_pkts);
181 pool = priv->pfe->hif.shm->pool;
183 return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts);
187 pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
189 struct hif_client_tx_queue *queue = tx_queue;
190 struct pfe_eth_priv_s *priv = queue->priv;
191 struct rte_eth_stats *stats = &priv->stats;
194 for (i = 0; i < nb_pkts; i++) {
195 if (tx_pkts[i]->nb_segs > 1) {
196 struct rte_mbuf *mbuf;
199 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
200 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
201 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
202 tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER,
205 mbuf = tx_pkts[i]->next;
206 for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) {
207 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
208 (void *)(size_t)rte_pktmbuf_iova(mbuf),
209 mbuf->buf_addr + mbuf->data_off,
215 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
216 (void *)(size_t)rte_pktmbuf_iova(mbuf),
217 mbuf->buf_addr + mbuf->data_off,
219 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
222 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
223 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
224 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
225 tx_pkts[i]->pkt_len, 0 /*ctrl*/,
226 HIF_FIRST_BUFFER | HIF_LAST_BUFFER |
230 stats->obytes += tx_pkts[i]->pkt_len;
233 stats->opackets += nb_pkts;
234 pfe_tx_do_cleanup(priv->pfe);
240 pfe_dummy_xmit_pkts(__rte_unused void *tx_queue,
241 __rte_unused struct rte_mbuf **tx_pkts,
242 __rte_unused uint16_t nb_pkts)
248 pfe_dummy_recv_pkts(__rte_unused void *rxq,
249 __rte_unused struct rte_mbuf **rx_pkts,
250 __rte_unused uint16_t nb_pkts)
256 pfe_eth_open(struct rte_eth_dev *dev)
258 struct pfe_eth_priv_s *priv = dev->data->dev_private;
259 struct hif_client_s *client;
260 struct hif_shm *hif_shm;
263 /* Register client driver with HIF */
264 client = &priv->client;
267 hif_shm = client->pfe->hif.shm;
268 /* TODO please remove the below code of if block, once we add
269 * the proper cleanup in eth_close
271 if (!test_bit(PFE_CL_GEM0 + priv->id,
272 &hif_shm->g_client_status[0])) {
273 /* Register client driver with HIF */
274 memset(client, 0, sizeof(*client));
275 client->id = PFE_CL_GEM0 + priv->id;
276 client->tx_qn = emac_txq_cnt;
277 client->rx_qn = EMAC_RXQ_CNT;
279 client->pfe = priv->pfe;
280 client->port_id = dev->data->port_id;
281 client->event_handler = pfe_eth_event_handler;
283 client->tx_qsize = EMAC_TXQ_DEPTH;
284 client->rx_qsize = EMAC_RXQ_DEPTH;
286 rc = hif_lib_client_register(client);
288 PFE_PMD_ERR("hif_lib_client_register(%d)"
289 " failed", client->id);
293 /* Freeing the packets if already exists */
295 struct rte_mbuf *rx_pkts[32];
296 /* TODO multiqueue support */
297 ret = hif_lib_receive_pkt(&client->rx_q[0],
298 hif_shm->pool, rx_pkts, 32);
301 for (i = 0; i < ret; i++)
302 rte_pktmbuf_free(rx_pkts[i]);
303 ret = hif_lib_receive_pkt(&client->rx_q[0],
309 /* Register client driver with HIF */
310 memset(client, 0, sizeof(*client));
311 client->id = PFE_CL_GEM0 + priv->id;
312 client->tx_qn = emac_txq_cnt;
313 client->rx_qn = EMAC_RXQ_CNT;
315 client->pfe = priv->pfe;
316 client->port_id = dev->data->port_id;
317 client->event_handler = pfe_eth_event_handler;
319 client->tx_qsize = EMAC_TXQ_DEPTH;
320 client->rx_qsize = EMAC_RXQ_DEPTH;
322 rc = hif_lib_client_register(client);
324 PFE_PMD_ERR("hif_lib_client_register(%d) failed",
329 rc = pfe_eth_start(priv);
330 dev->rx_pkt_burst = &pfe_recv_pkts;
331 dev->tx_pkt_burst = &pfe_xmit_pkts;
332 /* If no prefetch is configured. */
333 if (getenv("PFE_INTR_SUPPORT")) {
334 dev->rx_pkt_burst = &pfe_recv_pkts_on_intr;
335 PFE_PMD_INFO("PFE INTERRUPT Mode enabled");
344 pfe_eth_open_cdev(struct pfe_eth_priv_s *priv)
351 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY);
352 if (pfe_cdev_fd < 0) {
353 PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
355 PFE_PMD_WARN("Link status update will not be available.\n");
356 priv->link_fd = PFE_CDEV_INVALID_FD;
360 priv->link_fd = pfe_cdev_fd;
366 pfe_eth_close_cdev(struct pfe_eth_priv_s *priv)
371 if (priv->link_fd != PFE_CDEV_INVALID_FD) {
372 close(priv->link_fd);
373 priv->link_fd = PFE_CDEV_INVALID_FD;
378 pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/)
380 struct pfe_eth_priv_s *priv = dev->data->dev_private;
382 gemac_disable(priv->EMAC_baseaddr);
383 gpi_disable(priv->GPI_baseaddr);
385 dev->rx_pkt_burst = &pfe_dummy_recv_pkts;
386 dev->tx_pkt_burst = &pfe_dummy_xmit_pkts;
390 pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe)
392 PMD_INIT_FUNC_TRACE();
395 /* Close the device file for link status */
396 pfe_eth_close_cdev(dev->data->dev_private);
398 rte_free(dev->data->mac_addrs);
399 rte_eth_dev_release_port(dev);
404 pfe_eth_close(struct rte_eth_dev *dev)
412 pfe_eth_exit(dev, g_pfe);
414 if (g_pfe->nb_devs == 0) {
416 pfe_hif_lib_exit(g_pfe);
423 pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
429 pfe_eth_info(struct rte_eth_dev *dev,
430 struct rte_eth_dev_info *dev_info)
432 struct pfe_eth_priv_s *internals = dev->data->dev_private;
434 dev_info->if_index = internals->id;
435 dev_info->max_mac_addrs = PFE_MAX_MACS;
436 dev_info->max_rx_queues = dev->data->nb_rx_queues;
437 dev_info->max_tx_queues = dev->data->nb_tx_queues;
438 dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
439 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
440 dev_info->rx_offload_capa = dev_rx_offloads_sup;
441 dev_info->tx_offload_capa = dev_tx_offloads_sup;
442 if (pfe_svr == SVR_LS1012A_REV1) {
443 dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
444 dev_info->max_mtu = MAX_MTU_ON_REV1;
446 dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE;
447 dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD;
453 /* Only first mb_pool given on first call of this API will be used
454 * in whole system, also nb_rx_desc and rx_conf are unused params
457 pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
458 __rte_unused uint16_t nb_rx_desc,
459 __rte_unused unsigned int socket_id,
460 __rte_unused const struct rte_eth_rxconf *rx_conf,
461 struct rte_mempool *mb_pool)
465 struct pfe_eth_priv_s *priv = dev->data->dev_private;
469 if (queue_idx >= EMAC_RXQ_CNT) {
470 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
471 queue_idx, EMAC_RXQ_CNT);
475 if (!pfe->hif.setuped) {
476 rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
478 PFE_PMD_ERR("Could not allocate buffer descriptors");
482 pfe->hif.shm->pool = mb_pool;
483 if (pfe_hif_init_buffers(&pfe->hif)) {
484 PFE_PMD_ERR("Could not initialize buffer descriptors");
490 pfe->hif.setuped = 1;
492 dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
493 priv->client.rx_q[queue_idx].queue_id = queue_idx;
499 pfe_rx_queue_release(void *q __rte_unused)
501 PMD_INIT_FUNC_TRACE();
505 pfe_tx_queue_release(void *q __rte_unused)
507 PMD_INIT_FUNC_TRACE();
511 pfe_tx_queue_setup(struct rte_eth_dev *dev,
513 __rte_unused uint16_t nb_desc,
514 __rte_unused unsigned int socket_id,
515 __rte_unused const struct rte_eth_txconf *tx_conf)
517 struct pfe_eth_priv_s *priv = dev->data->dev_private;
519 if (queue_idx >= emac_txq_cnt) {
520 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
521 queue_idx, emac_txq_cnt);
524 dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
525 priv->client.tx_q[queue_idx].queue_id = queue_idx;
529 static const uint32_t *
530 pfe_supported_ptypes_get(struct rte_eth_dev *dev)
532 static const uint32_t ptypes[] = {
533 /*todo -= add more types */
536 RTE_PTYPE_L3_IPV4_EXT,
538 RTE_PTYPE_L3_IPV6_EXT,
544 if (dev->rx_pkt_burst == pfe_recv_pkts ||
545 dev->rx_pkt_burst == pfe_recv_pkts_on_intr)
551 pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
554 struct pfe_eth_priv_s *priv = dev->data->dev_private;
555 uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
557 /*TODO Support VLAN*/
558 ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size);
560 dev->data->mtu = mtu;
565 /* pfe_eth_enet_addr_byte_mac
568 pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
569 struct pfe_mac_addr *enet_addr)
571 if (!enet_byte_addr || !enet_addr) {
575 enet_addr->bottom = enet_byte_addr[0] |
576 (enet_byte_addr[1] << 8) |
577 (enet_byte_addr[2] << 16) |
578 (enet_byte_addr[3] << 24);
579 enet_addr->top = enet_byte_addr[4] |
580 (enet_byte_addr[5] << 8);
586 pfe_dev_set_mac_addr(struct rte_eth_dev *dev,
587 struct rte_ether_addr *addr)
589 struct pfe_eth_priv_s *priv = dev->data->dev_private;
590 struct pfe_mac_addr spec_addr;
593 ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr);
597 gemac_set_laddrN(priv->EMAC_baseaddr,
598 (struct pfe_mac_addr *)&spec_addr, 1);
599 rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
604 pfe_stats_get(struct rte_eth_dev *dev,
605 struct rte_eth_stats *stats)
607 struct pfe_eth_priv_s *priv = dev->data->dev_private;
608 struct rte_eth_stats *eth_stats = &priv->stats;
613 memset(stats, 0, sizeof(struct rte_eth_stats));
615 stats->ipackets = eth_stats->ipackets;
616 stats->ibytes = eth_stats->ibytes;
617 stats->opackets = eth_stats->opackets;
618 stats->obytes = eth_stats->obytes;
623 static const struct eth_dev_ops ops = {
624 .dev_start = pfe_eth_open,
625 .dev_stop = pfe_eth_stop,
626 .dev_close = pfe_eth_close,
627 .dev_configure = pfe_eth_configure,
628 .dev_infos_get = pfe_eth_info,
629 .rx_queue_setup = pfe_rx_queue_setup,
630 .rx_queue_release = pfe_rx_queue_release,
631 .tx_queue_setup = pfe_tx_queue_setup,
632 .tx_queue_release = pfe_tx_queue_release,
633 .dev_supported_ptypes_get = pfe_supported_ptypes_get,
634 .mtu_set = pfe_mtu_set,
635 .mac_addr_set = pfe_dev_set_mac_addr,
636 .stats_get = pfe_stats_get,
640 pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
642 struct rte_eth_dev *eth_dev = NULL;
643 struct pfe_eth_priv_s *priv = NULL;
644 struct ls1012a_eth_platform_data *einfo;
645 struct ls1012a_pfe_platform_data *pfe_info;
646 struct rte_ether_addr addr;
649 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
653 /* Extract pltform data */
654 pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
656 PFE_PMD_ERR("pfe missing additional platform data");
661 einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata;
663 /* einfo never be NULL, but no harm in having this check */
665 PFE_PMD_ERR("pfe missing additional gemacs platform data");
670 priv = eth_dev->data->dev_private;
671 priv->ndev = eth_dev;
672 priv->id = einfo[id].gem_id;
675 pfe->eth.eth_priv[id] = priv;
677 /* Set the info in the priv to the current info */
678 priv->einfo = &einfo[id];
679 priv->EMAC_baseaddr = cbus_emac_base[id];
680 priv->PHY_baseaddr = cbus_emac_base[id];
681 priv->GPI_baseaddr = cbus_gpi_base[id];
683 #define HIF_GEMAC_TMUQ_BASE 6
684 priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
685 priv->high_tmu_q = priv->low_tmu_q + 1;
687 rte_spinlock_init(&priv->lock);
689 /* Copy the station address into the dev structure, */
690 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
691 ETHER_ADDR_LEN * PFE_MAX_MACS, 0);
692 if (eth_dev->data->mac_addrs == NULL) {
693 PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
694 ETHER_ADDR_LEN * PFE_MAX_MACS);
699 memcpy(addr.addr_bytes, priv->einfo->mac_addr,
702 pfe_dev_set_mac_addr(eth_dev, &addr);
703 rte_ether_addr_copy(&addr, ð_dev->data->mac_addrs[0]);
705 eth_dev->data->mtu = 1500;
706 eth_dev->dev_ops = &ops;
707 pfe_eth_stop(eth_dev);
708 pfe_gemac_init(priv);
710 eth_dev->data->nb_rx_queues = 1;
711 eth_dev->data->nb_tx_queues = 1;
713 /* For link status, open the PFE CDEV; Error from this function
714 * is silently ignored; In case of error, the link status will not
717 pfe_eth_open_cdev(priv);
718 rte_eth_dev_probing_finish(eth_dev);
722 rte_eth_dev_release_port(eth_dev);
727 pfe_get_gemac_if_proprties(struct pfe *pfe,
728 __rte_unused const struct device_node *parent,
729 unsigned int port, unsigned int if_cnt,
730 struct ls1012a_pfe_platform_data *pdata)
732 const struct device_node *gem = NULL;
734 unsigned int ii = 0, phy_id = 0;
736 const void *mac_addr;
738 for (ii = 0; ii < if_cnt; ii++) {
739 gem = of_get_next_child(parent, gem);
742 addr = of_get_property(gem, "reg", &size);
743 if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port))
748 PFE_PMD_ERR("Failed to find interface = %d", if_cnt);
752 pdata->ls1012a_eth_pdata[port].gem_id = port;
754 mac_addr = of_get_mac_address(gem);
757 memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
761 addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
763 PFE_PMD_ERR("Invalid mdio-mux-val....");
765 phy_id = rte_be_to_cpu_32((unsigned int)*addr);
766 pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
768 if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
769 pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
770 pdata->ls1012a_eth_pdata[port].mdio_muxval;
778 /* Parse integer from integer argument */
780 parse_integer_arg(const char *key __rte_unused,
781 const char *value, void *extra_args)
787 i = strtol(value, &end, 10);
788 if (*end != 0 || errno != 0 || i < 0 || i > 1) {
789 PFE_PMD_ERR("Supported Port IDS are 0 and 1");
793 *((uint32_t *)extra_args) = i;
799 pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params,
800 struct rte_vdev_device *dev)
802 struct rte_kvargs *kvlist = NULL;
805 static const char * const pfe_vdev_valid_params[] = {
810 const char *input_args = rte_vdev_device_args(dev);
815 kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params);
819 ret = rte_kvargs_process(kvlist,
823 rte_kvargs_free(kvlist);
828 pmd_pfe_probe(struct rte_vdev_device *vdev)
831 const struct device_node *np;
833 const uint32_t *addr;
834 uint64_t cbus_addr, ddr_size, cbus_size;
835 int rc = -1, fd = -1, gem_id;
836 unsigned int ii, interface_count = 0;
838 struct pfe_vdev_init_params init_params = {
842 name = rte_vdev_device_name(vdev);
843 rc = pfe_parse_vdev_init_params(&init_params, vdev);
847 RTE_LOG(INFO, PMD, "Initializing pmd_pfe for %s Given gem-id %d\n",
848 name, init_params.gem_id);
851 if (g_pfe->nb_devs >= g_pfe->max_intf) {
852 PFE_PMD_ERR("PFE %d dev already created Max is %d",
853 g_pfe->nb_devs, g_pfe->max_intf);
859 g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE);
863 /* Load the device-tree driver */
866 PFE_PMD_ERR("of_init failed with ret: %d", rc);
870 np = of_find_compatible_node(NULL, NULL, "fsl,pfe");
872 PFE_PMD_ERR("Invalid device node");
877 addr = of_get_address(np, 0, &cbus_size, NULL);
879 PFE_PMD_ERR("of_get_address cannot return qman address\n");
882 cbus_addr = of_translate_address(np, addr);
884 PFE_PMD_ERR("of_translate_address failed\n");
888 addr = of_get_address(np, 1, &ddr_size, NULL);
890 PFE_PMD_ERR("of_get_address cannot return qman address\n");
894 g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr);
895 if (!g_pfe->ddr_phys_baseaddr) {
896 PFE_PMD_ERR("of_translate_address failed\n");
900 g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr);
901 g_pfe->ddr_size = ddr_size;
902 g_pfe->cbus_size = cbus_size;
904 fd = open("/dev/mem", O_RDWR);
905 g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE,
906 MAP_SHARED, fd, cbus_addr);
908 if (g_pfe->cbus_baseaddr == MAP_FAILED) {
909 PFE_PMD_ERR("Can not map cbus base");
914 /* Read interface count */
915 prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
917 PFE_PMD_ERR("Failed to read number of interfaces");
922 interface_count = rte_be_to_cpu_32((unsigned int)*prop);
923 if (interface_count <= 0) {
924 PFE_PMD_ERR("No ethernet interface count : %d",
929 PFE_PMD_INFO("num interfaces = %d ", interface_count);
931 g_pfe->max_intf = interface_count;
932 g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
934 for (ii = 0; ii < interface_count; ii++) {
935 pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count,
936 &g_pfe->platform_data);
939 pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr,
940 g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size);
942 PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION));
943 PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION));
945 PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION));
946 PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION));
948 PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION));
949 PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION));
950 PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION));
952 PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION));
953 PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION));
955 cbus_emac_base[0] = EMAC1_BASE_ADDR;
956 cbus_emac_base[1] = EMAC2_BASE_ADDR;
958 cbus_gpi_base[0] = EGPI1_BASE_ADDR;
959 cbus_gpi_base[1] = EGPI2_BASE_ADDR;
961 rc = pfe_hif_lib_init(g_pfe);
965 rc = pfe_hif_init(g_pfe);
968 pfe_soc_version_get();
970 if (init_params.gem_id < 0)
971 gem_id = g_pfe->nb_devs;
973 gem_id = init_params.gem_id;
975 RTE_LOG(INFO, PMD, "Init pmd_pfe for %s gem-id %d(given =%d)\n",
976 name, gem_id, init_params.gem_id);
978 rc = pfe_eth_init(vdev, g_pfe, gem_id);
990 pfe_hif_lib_exit(g_pfe);
994 munmap(g_pfe->cbus_baseaddr, cbus_size);
1001 pmd_pfe_remove(struct rte_vdev_device *vdev)
1004 struct rte_eth_dev *eth_dev = NULL;
1006 name = rte_vdev_device_name(vdev);
1010 PFE_PMD_INFO("Closing eventdev sw device %s", name);
1015 eth_dev = rte_eth_dev_allocated(name);
1016 if (eth_dev == NULL)
1019 pfe_eth_exit(eth_dev, g_pfe);
1020 munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size);
1022 if (g_pfe->nb_devs == 0) {
1023 pfe_hif_exit(g_pfe);
1024 pfe_hif_lib_exit(g_pfe);
1032 struct rte_vdev_driver pmd_pfe_drv = {
1033 .probe = pmd_pfe_probe,
1034 .remove = pmd_pfe_remove,
1037 RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv);
1038 RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> ");
1040 RTE_INIT(pfe_pmd_init_log)
1042 pfe_logtype_pmd = rte_log_register("pmd.net.pfe");
1043 if (pfe_logtype_pmd >= 0)
1044 rte_log_set_level(pfe_logtype_pmd, RTE_LOG_NOTICE);