1 /* SPDX-License-Identifier: BSD-3-Clause
5 #include <rte_kvargs.h>
6 #include <rte_ethdev_vdev.h>
7 #include <rte_bus_vdev.h>
13 #define PFE_MAX_MACS 1 /*we can support upto 4 MACs per IF*/
14 #define PFE_VDEV_GEM_ID_ARG "intf"
16 struct pfe_vdev_init_params {
19 static struct pfe *g_pfe;
20 /* Supported Rx offloads */
21 static uint64_t dev_rx_offloads_sup =
22 DEV_RX_OFFLOAD_IPV4_CKSUM |
23 DEV_RX_OFFLOAD_UDP_CKSUM |
24 DEV_RX_OFFLOAD_TCP_CKSUM;
26 /* Supported Tx offloads */
27 static uint64_t dev_tx_offloads_sup =
28 DEV_TX_OFFLOAD_IPV4_CKSUM |
29 DEV_TX_OFFLOAD_UDP_CKSUM |
30 DEV_TX_OFFLOAD_TCP_CKSUM;
32 /* TODO: make pfe_svr a runtime option.
33 * Driver should be able to get the SVR
34 * information from HW.
36 unsigned int pfe_svr = SVR_LS1012A_REV1;
37 static void *cbus_emac_base[3];
38 static void *cbus_gpi_base[3];
45 pfe_gemac_init(struct pfe_eth_priv_s *priv)
49 cfg.speed = SPEED_1000M;
50 cfg.duplex = DUPLEX_FULL;
52 gemac_set_config(priv->EMAC_baseaddr, &cfg);
53 gemac_allow_broadcast(priv->EMAC_baseaddr);
54 gemac_enable_1536_rx(priv->EMAC_baseaddr);
55 gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
56 gemac_enable_pause_rx(priv->EMAC_baseaddr);
57 gemac_set_bus_width(priv->EMAC_baseaddr, 64);
58 gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
64 pfe_soc_version_get(void)
66 FILE *svr_file = NULL;
67 unsigned int svr_ver = 0;
69 PMD_INIT_FUNC_TRACE();
71 svr_file = fopen(PFE_SOC_ID_FILE, "r");
73 PFE_PMD_ERR("Unable to open SoC device");
74 return; /* Not supported on this infra */
77 if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
80 PFE_PMD_ERR("Unable to read SoC device");
85 static int pfe_eth_start(struct pfe_eth_priv_s *priv)
87 gpi_enable(priv->GPI_baseaddr);
88 gemac_enable(priv->EMAC_baseaddr);
94 pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
95 __rte_unused from_tx, __rte_unused int n_desc)
97 struct rte_mbuf *mbuf;
100 /* Clean HIF and client queue */
101 while ((mbuf = hif_lib_tx_get_next_complete(&priv->client,
107 rte_pktmbuf_free(mbuf);
114 pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
118 for (ii = 0; ii < emac_txq_cnt; ii++)
119 pfe_eth_flush_txQ(priv, ii, 0, 0);
123 pfe_eth_event_handler(void *data, int event, __rte_unused int qno)
125 struct pfe_eth_priv_s *priv = data;
128 case EVENT_TXDONE_IND:
129 pfe_eth_flush_tx(priv);
130 hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
132 case EVENT_HIGH_RX_WM:
141 pfe_eth_open(struct rte_eth_dev *dev)
143 struct pfe_eth_priv_s *priv = dev->data->dev_private;
144 struct hif_client_s *client;
145 struct hif_shm *hif_shm;
148 /* Register client driver with HIF */
149 client = &priv->client;
152 hif_shm = client->pfe->hif.shm;
153 /* TODO please remove the below code of if block, once we add
154 * the proper cleanup in eth_close
156 if (!test_bit(PFE_CL_GEM0 + priv->id,
157 &hif_shm->g_client_status[0])) {
158 /* Register client driver with HIF */
159 memset(client, 0, sizeof(*client));
160 client->id = PFE_CL_GEM0 + priv->id;
161 client->tx_qn = emac_txq_cnt;
162 client->rx_qn = EMAC_RXQ_CNT;
164 client->pfe = priv->pfe;
165 client->port_id = dev->data->port_id;
166 client->event_handler = pfe_eth_event_handler;
168 client->tx_qsize = EMAC_TXQ_DEPTH;
169 client->rx_qsize = EMAC_RXQ_DEPTH;
171 rc = hif_lib_client_register(client);
173 PFE_PMD_ERR("hif_lib_client_register(%d)"
174 " failed", client->id);
179 /* Register client driver with HIF */
180 memset(client, 0, sizeof(*client));
181 client->id = PFE_CL_GEM0 + priv->id;
182 client->tx_qn = emac_txq_cnt;
183 client->rx_qn = EMAC_RXQ_CNT;
185 client->pfe = priv->pfe;
186 client->port_id = dev->data->port_id;
187 client->event_handler = pfe_eth_event_handler;
189 client->tx_qsize = EMAC_TXQ_DEPTH;
190 client->rx_qsize = EMAC_RXQ_DEPTH;
192 rc = hif_lib_client_register(client);
194 PFE_PMD_ERR("hif_lib_client_register(%d) failed",
199 rc = pfe_eth_start(priv);
206 pfe_eth_open_cdev(struct pfe_eth_priv_s *priv)
213 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY);
214 if (pfe_cdev_fd < 0) {
215 PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
217 PFE_PMD_WARN("Link status update will not be available.\n");
218 priv->link_fd = PFE_CDEV_INVALID_FD;
222 priv->link_fd = pfe_cdev_fd;
228 pfe_eth_close_cdev(struct pfe_eth_priv_s *priv)
233 if (priv->link_fd != PFE_CDEV_INVALID_FD) {
234 close(priv->link_fd);
235 priv->link_fd = PFE_CDEV_INVALID_FD;
240 pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/)
242 struct pfe_eth_priv_s *priv = dev->data->dev_private;
244 gemac_disable(priv->EMAC_baseaddr);
245 gpi_disable(priv->GPI_baseaddr);
249 pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe)
251 PMD_INIT_FUNC_TRACE();
254 /* Close the device file for link status */
255 pfe_eth_close_cdev(dev->data->dev_private);
257 rte_free(dev->data->mac_addrs);
258 rte_eth_dev_release_port(dev);
263 pfe_eth_close(struct rte_eth_dev *dev)
271 pfe_eth_exit(dev, g_pfe);
273 if (g_pfe->nb_devs == 0) {
275 pfe_hif_lib_exit(g_pfe);
282 pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
288 pfe_eth_info(struct rte_eth_dev *dev,
289 struct rte_eth_dev_info *dev_info)
291 struct pfe_eth_priv_s *internals = dev->data->dev_private;
293 dev_info->if_index = internals->id;
294 dev_info->max_mac_addrs = PFE_MAX_MACS;
295 dev_info->max_rx_queues = dev->data->nb_rx_queues;
296 dev_info->max_tx_queues = dev->data->nb_tx_queues;
297 dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
298 dev_info->rx_offload_capa = dev_rx_offloads_sup;
299 dev_info->tx_offload_capa = dev_tx_offloads_sup;
300 if (pfe_svr == SVR_LS1012A_REV1)
301 dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
303 dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE;
308 /* Only first mb_pool given on first call of this API will be used
309 * in whole system, also nb_rx_desc and rx_conf are unused params
312 pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
313 __rte_unused uint16_t nb_rx_desc,
314 __rte_unused unsigned int socket_id,
315 __rte_unused const struct rte_eth_rxconf *rx_conf,
316 struct rte_mempool *mb_pool)
320 struct pfe_eth_priv_s *priv = dev->data->dev_private;
324 if (queue_idx >= EMAC_RXQ_CNT) {
325 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
326 queue_idx, EMAC_RXQ_CNT);
330 if (!pfe->hif.setuped) {
331 rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
333 PFE_PMD_ERR("Could not allocate buffer descriptors");
337 pfe->hif.shm->pool = mb_pool;
338 if (pfe_hif_init_buffers(&pfe->hif)) {
339 PFE_PMD_ERR("Could not initialize buffer descriptors");
345 pfe->hif.setuped = 1;
347 dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
348 priv->client.rx_q[queue_idx].queue_id = queue_idx;
354 pfe_rx_queue_release(void *q __rte_unused)
356 PMD_INIT_FUNC_TRACE();
360 pfe_tx_queue_release(void *q __rte_unused)
362 PMD_INIT_FUNC_TRACE();
366 pfe_tx_queue_setup(struct rte_eth_dev *dev,
368 __rte_unused uint16_t nb_desc,
369 __rte_unused unsigned int socket_id,
370 __rte_unused const struct rte_eth_txconf *tx_conf)
372 struct pfe_eth_priv_s *priv = dev->data->dev_private;
374 if (queue_idx >= emac_txq_cnt) {
375 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
376 queue_idx, emac_txq_cnt);
379 dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
380 priv->client.tx_q[queue_idx].queue_id = queue_idx;
384 static const struct eth_dev_ops ops = {
385 .dev_start = pfe_eth_open,
386 .dev_stop = pfe_eth_stop,
387 .dev_close = pfe_eth_close,
388 .dev_configure = pfe_eth_configure,
389 .dev_infos_get = pfe_eth_info,
390 .rx_queue_setup = pfe_rx_queue_setup,
391 .rx_queue_release = pfe_rx_queue_release,
392 .tx_queue_setup = pfe_tx_queue_setup,
393 .tx_queue_release = pfe_tx_queue_release,
397 pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
399 struct rte_eth_dev *eth_dev = NULL;
400 struct pfe_eth_priv_s *priv = NULL;
401 struct ls1012a_eth_platform_data *einfo;
402 struct ls1012a_pfe_platform_data *pfe_info;
405 eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
409 /* Extract pltform data */
410 pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
412 PFE_PMD_ERR("pfe missing additional platform data");
417 einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata;
419 /* einfo never be NULL, but no harm in having this check */
421 PFE_PMD_ERR("pfe missing additional gemacs platform data");
426 priv = eth_dev->data->dev_private;
427 priv->ndev = eth_dev;
428 priv->id = einfo[id].gem_id;
431 pfe->eth.eth_priv[id] = priv;
433 /* Set the info in the priv to the current info */
434 priv->einfo = &einfo[id];
435 priv->EMAC_baseaddr = cbus_emac_base[id];
436 priv->PHY_baseaddr = cbus_emac_base[id];
437 priv->GPI_baseaddr = cbus_gpi_base[id];
439 #define HIF_GEMAC_TMUQ_BASE 6
440 priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
441 priv->high_tmu_q = priv->low_tmu_q + 1;
443 rte_spinlock_init(&priv->lock);
445 /* Copy the station address into the dev structure, */
446 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
447 ETHER_ADDR_LEN * PFE_MAX_MACS, 0);
448 if (eth_dev->data->mac_addrs == NULL) {
449 PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
450 ETHER_ADDR_LEN * PFE_MAX_MACS);
455 eth_dev->data->mtu = 1500;
456 eth_dev->dev_ops = &ops;
457 pfe_eth_stop(eth_dev);
458 pfe_gemac_init(priv);
460 eth_dev->data->nb_rx_queues = 1;
461 eth_dev->data->nb_tx_queues = 1;
463 /* For link status, open the PFE CDEV; Error from this function
464 * is silently ignored; In case of error, the link status will not
467 pfe_eth_open_cdev(priv);
468 rte_eth_dev_probing_finish(eth_dev);
472 rte_eth_dev_release_port(eth_dev);
477 pfe_get_gemac_if_proprties(struct pfe *pfe,
478 __rte_unused const struct device_node *parent,
479 unsigned int port, unsigned int if_cnt,
480 struct ls1012a_pfe_platform_data *pdata)
482 const struct device_node *gem = NULL;
484 unsigned int ii = 0, phy_id = 0;
486 const void *mac_addr;
488 for (ii = 0; ii < if_cnt; ii++) {
489 gem = of_get_next_child(parent, gem);
492 addr = of_get_property(gem, "reg", &size);
493 if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port))
498 PFE_PMD_ERR("Failed to find interface = %d", if_cnt);
502 pdata->ls1012a_eth_pdata[port].gem_id = port;
504 mac_addr = of_get_mac_address(gem);
507 memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
511 addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
513 PFE_PMD_ERR("Invalid mdio-mux-val....");
515 phy_id = rte_be_to_cpu_32((unsigned int)*addr);
516 pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
518 if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
519 pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
520 pdata->ls1012a_eth_pdata[port].mdio_muxval;
528 /* Parse integer from integer argument */
530 parse_integer_arg(const char *key __rte_unused,
531 const char *value, void *extra_args)
537 i = strtol(value, &end, 10);
538 if (*end != 0 || errno != 0 || i < 0 || i > 1) {
539 PFE_PMD_ERR("Supported Port IDS are 0 and 1");
543 *((uint32_t *)extra_args) = i;
549 pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params,
550 struct rte_vdev_device *dev)
552 struct rte_kvargs *kvlist = NULL;
555 static const char * const pfe_vdev_valid_params[] = {
560 const char *input_args = rte_vdev_device_args(dev);
565 kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params);
569 ret = rte_kvargs_process(kvlist,
573 rte_kvargs_free(kvlist);
578 pmd_pfe_probe(struct rte_vdev_device *vdev)
581 const struct device_node *np;
583 const uint32_t *addr;
584 uint64_t cbus_addr, ddr_size, cbus_size;
585 int rc = -1, fd = -1, gem_id;
586 unsigned int ii, interface_count = 0;
588 struct pfe_vdev_init_params init_params = {
592 name = rte_vdev_device_name(vdev);
593 rc = pfe_parse_vdev_init_params(&init_params, vdev);
597 RTE_LOG(INFO, PMD, "Initializing pmd_pfe for %s Given gem-id %d\n",
598 name, init_params.gem_id);
601 if (g_pfe->nb_devs >= g_pfe->max_intf) {
602 PFE_PMD_ERR("PFE %d dev already created Max is %d",
603 g_pfe->nb_devs, g_pfe->max_intf);
609 g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE);
613 /* Load the device-tree driver */
616 PFE_PMD_ERR("of_init failed with ret: %d", rc);
620 np = of_find_compatible_node(NULL, NULL, "fsl,pfe");
622 PFE_PMD_ERR("Invalid device node");
627 addr = of_get_address(np, 0, &cbus_size, NULL);
629 PFE_PMD_ERR("of_get_address cannot return qman address\n");
632 cbus_addr = of_translate_address(np, addr);
634 PFE_PMD_ERR("of_translate_address failed\n");
638 addr = of_get_address(np, 1, &ddr_size, NULL);
640 PFE_PMD_ERR("of_get_address cannot return qman address\n");
644 g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr);
645 if (!g_pfe->ddr_phys_baseaddr) {
646 PFE_PMD_ERR("of_translate_address failed\n");
650 g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr);
651 g_pfe->ddr_size = ddr_size;
652 g_pfe->cbus_size = cbus_size;
654 fd = open("/dev/mem", O_RDWR);
655 g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE,
656 MAP_SHARED, fd, cbus_addr);
658 if (g_pfe->cbus_baseaddr == MAP_FAILED) {
659 PFE_PMD_ERR("Can not map cbus base");
664 /* Read interface count */
665 prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
667 PFE_PMD_ERR("Failed to read number of interfaces");
672 interface_count = rte_be_to_cpu_32((unsigned int)*prop);
673 if (interface_count <= 0) {
674 PFE_PMD_ERR("No ethernet interface count : %d",
679 PFE_PMD_INFO("num interfaces = %d ", interface_count);
681 g_pfe->max_intf = interface_count;
682 g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
684 for (ii = 0; ii < interface_count; ii++) {
685 pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count,
686 &g_pfe->platform_data);
689 pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr,
690 g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size);
692 PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION));
693 PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION));
695 PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION));
696 PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION));
698 PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION));
699 PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION));
700 PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION));
702 PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION));
703 PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION));
705 cbus_emac_base[0] = EMAC1_BASE_ADDR;
706 cbus_emac_base[1] = EMAC2_BASE_ADDR;
708 cbus_gpi_base[0] = EGPI1_BASE_ADDR;
709 cbus_gpi_base[1] = EGPI2_BASE_ADDR;
711 rc = pfe_hif_lib_init(g_pfe);
715 rc = pfe_hif_init(g_pfe);
718 pfe_soc_version_get();
720 if (init_params.gem_id < 0)
721 gem_id = g_pfe->nb_devs;
723 gem_id = init_params.gem_id;
725 RTE_LOG(INFO, PMD, "Init pmd_pfe for %s gem-id %d(given =%d)\n",
726 name, gem_id, init_params.gem_id);
728 rc = pfe_eth_init(vdev, g_pfe, gem_id);
740 pfe_hif_lib_exit(g_pfe);
744 munmap(g_pfe->cbus_baseaddr, cbus_size);
751 pmd_pfe_remove(struct rte_vdev_device *vdev)
754 struct rte_eth_dev *eth_dev = NULL;
756 name = rte_vdev_device_name(vdev);
760 PFE_PMD_INFO("Closing eventdev sw device %s", name);
765 eth_dev = rte_eth_dev_allocated(name);
769 pfe_eth_exit(eth_dev, g_pfe);
770 munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size);
772 if (g_pfe->nb_devs == 0) {
774 pfe_hif_lib_exit(g_pfe);
782 struct rte_vdev_driver pmd_pfe_drv = {
783 .probe = pmd_pfe_probe,
784 .remove = pmd_pfe_remove,
787 RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv);
788 RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> ");
790 RTE_INIT(pfe_pmd_init_log)
792 pfe_logtype_pmd = rte_log_register("pmd.net.pfe");
793 if (pfe_logtype_pmd >= 0)
794 rte_log_set_level(pfe_logtype_pmd, RTE_LOG_NOTICE);