ethdev: allow drivers to return error on close
[dpdk.git] / drivers / net / pfe / pfe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <sys/ioctl.h>
6 #include <sys/epoll.h>
7 #include <rte_kvargs.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_bus_vdev.h>
10 #include <rte_ether.h>
11 #include <dpaa_of.h>
12
13 #include "pfe_logs.h"
14 #include "pfe_mod.h"
15
16 #define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */
17 #define PFE_VDEV_GEM_ID_ARG     "intf"
18
19 struct pfe_vdev_init_params {
20         int8_t  gem_id;
21 };
22 static struct pfe *g_pfe;
23 /* Supported Rx offloads */
24 static uint64_t dev_rx_offloads_sup =
25                 DEV_RX_OFFLOAD_IPV4_CKSUM |
26                 DEV_RX_OFFLOAD_UDP_CKSUM |
27                 DEV_RX_OFFLOAD_TCP_CKSUM;
28
29 /* Supported Tx offloads */
30 static uint64_t dev_tx_offloads_sup =
31                 DEV_TX_OFFLOAD_IPV4_CKSUM |
32                 DEV_TX_OFFLOAD_UDP_CKSUM |
33                 DEV_TX_OFFLOAD_TCP_CKSUM;
34
35 /* TODO: make pfe_svr a runtime option.
36  * Driver should be able to get the SVR
37  * information from HW.
38  */
39 unsigned int pfe_svr = SVR_LS1012A_REV1;
40 static void *cbus_emac_base[3];
41 static void *cbus_gpi_base[3];
42
43 /* pfe_gemac_init
44  */
45 static int
46 pfe_gemac_init(struct pfe_eth_priv_s *priv)
47 {
48         struct gemac_cfg cfg;
49
50         cfg.speed = SPEED_1000M;
51         cfg.duplex = DUPLEX_FULL;
52
53         gemac_set_config(priv->EMAC_baseaddr, &cfg);
54         gemac_allow_broadcast(priv->EMAC_baseaddr);
55         gemac_enable_1536_rx(priv->EMAC_baseaddr);
56         gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
57         gemac_enable_pause_rx(priv->EMAC_baseaddr);
58         gemac_set_bus_width(priv->EMAC_baseaddr, 64);
59         gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
60
61         return 0;
62 }
63
64 static void
65 pfe_soc_version_get(void)
66 {
67         FILE *svr_file = NULL;
68         unsigned int svr_ver = 0;
69
70         PMD_INIT_FUNC_TRACE();
71
72         svr_file = fopen(PFE_SOC_ID_FILE, "r");
73         if (!svr_file) {
74                 PFE_PMD_ERR("Unable to open SoC device");
75                 return; /* Not supported on this infra */
76         }
77
78         if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
79                 pfe_svr = svr_ver;
80         else
81                 PFE_PMD_ERR("Unable to read SoC device");
82
83         fclose(svr_file);
84 }
85
86 static int pfe_eth_start(struct pfe_eth_priv_s *priv)
87 {
88         gpi_enable(priv->GPI_baseaddr);
89         gemac_enable(priv->EMAC_baseaddr);
90
91         return 0;
92 }
93
94 static void
95 pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
96                   __rte_unused from_tx, __rte_unused int n_desc)
97 {
98         struct rte_mbuf *mbuf;
99         unsigned int flags;
100
101         /* Clean HIF and client queue */
102         while ((mbuf = hif_lib_tx_get_next_complete(&priv->client,
103                                                    tx_q_num, &flags,
104                                                    HIF_TX_DESC_NT))) {
105                 if (mbuf) {
106                         mbuf->next = NULL;
107                         mbuf->nb_segs = 1;
108                         rte_pktmbuf_free(mbuf);
109                 }
110         }
111 }
112
113
114 static void
115 pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
116 {
117         unsigned int ii;
118
119         for (ii = 0; ii < emac_txq_cnt; ii++)
120                 pfe_eth_flush_txQ(priv, ii, 0, 0);
121 }
122
123 static int
124 pfe_eth_event_handler(void *data, int event, __rte_unused int qno)
125 {
126         struct pfe_eth_priv_s *priv = data;
127
128         switch (event) {
129         case EVENT_TXDONE_IND:
130                 pfe_eth_flush_tx(priv);
131                 hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
132                 break;
133         case EVENT_HIGH_RX_WM:
134         default:
135                 break;
136         }
137
138         return 0;
139 }
140
141 static uint16_t
142 pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
143 {
144         struct hif_client_rx_queue *queue = rxq;
145         struct pfe_eth_priv_s *priv = queue->priv;
146         struct epoll_event epoll_ev;
147         uint64_t ticks = 1;  /* 1 msec */
148         int ret;
149         int have_something, work_done;
150
151 #define RESET_STATUS (HIF_INT | HIF_RXPKT_INT)
152
153         /*TODO can we remove this cleanup from here?*/
154         pfe_tx_do_cleanup(priv->pfe);
155         have_something = pfe_hif_rx_process(priv->pfe, nb_pkts);
156         work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool,
157                         rx_pkts, nb_pkts);
158
159         if (!have_something || !work_done) {
160                 writel(RESET_STATUS, HIF_INT_SRC);
161                 writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE);
162                 ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks);
163                 if (ret < 0 && errno != EINTR)
164                         PFE_PMD_ERR("epoll_wait fails with %d\n", errno);
165         }
166
167         return work_done;
168 }
169
170 static uint16_t
171 pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
172 {
173         struct hif_client_rx_queue *queue = rxq;
174         struct pfe_eth_priv_s *priv = queue->priv;
175         struct rte_mempool *pool;
176
177         /*TODO can we remove this cleanup from here?*/
178         pfe_tx_do_cleanup(priv->pfe);
179         pfe_hif_rx_process(priv->pfe, nb_pkts);
180         pool = priv->pfe->hif.shm->pool;
181
182         return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts);
183 }
184
185 static uint16_t
186 pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
187 {
188         struct hif_client_tx_queue *queue = tx_queue;
189         struct pfe_eth_priv_s *priv = queue->priv;
190         struct rte_eth_stats *stats = &priv->stats;
191         int i;
192
193         for (i = 0; i < nb_pkts; i++) {
194                 if (tx_pkts[i]->nb_segs > 1) {
195                         struct rte_mbuf *mbuf;
196                         int j;
197
198                         hif_lib_xmit_pkt(&priv->client, queue->queue_id,
199                                 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
200                                 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
201                                 tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER,
202                                 tx_pkts[i]);
203
204                         mbuf = tx_pkts[i]->next;
205                         for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) {
206                                 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
207                                         (void *)(size_t)rte_pktmbuf_iova(mbuf),
208                                         mbuf->buf_addr + mbuf->data_off,
209                                         mbuf->data_len,
210                                         0x0, 0x0, mbuf);
211                                 mbuf = mbuf->next;
212                         }
213
214                         hif_lib_xmit_pkt(&priv->client, queue->queue_id,
215                                         (void *)(size_t)rte_pktmbuf_iova(mbuf),
216                                         mbuf->buf_addr + mbuf->data_off,
217                                         mbuf->data_len,
218                                         0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
219                                         mbuf);
220                 } else {
221                         hif_lib_xmit_pkt(&priv->client, queue->queue_id,
222                                 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
223                                 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
224                                 tx_pkts[i]->pkt_len, 0 /*ctrl*/,
225                                 HIF_FIRST_BUFFER | HIF_LAST_BUFFER |
226                                 HIF_DATA_VALID,
227                                 tx_pkts[i]);
228                 }
229                 stats->obytes += tx_pkts[i]->pkt_len;
230                 hif_tx_dma_start();
231         }
232         stats->opackets += nb_pkts;
233         pfe_tx_do_cleanup(priv->pfe);
234
235         return nb_pkts;
236 }
237
238 static uint16_t
239 pfe_dummy_xmit_pkts(__rte_unused void *tx_queue,
240                 __rte_unused struct rte_mbuf **tx_pkts,
241                 __rte_unused uint16_t nb_pkts)
242 {
243         return 0;
244 }
245
246 static uint16_t
247 pfe_dummy_recv_pkts(__rte_unused void *rxq,
248                 __rte_unused struct rte_mbuf **rx_pkts,
249                 __rte_unused uint16_t nb_pkts)
250 {
251         return 0;
252 }
253
254 static int
255 pfe_eth_open(struct rte_eth_dev *dev)
256 {
257         struct pfe_eth_priv_s *priv = dev->data->dev_private;
258         struct hif_client_s *client;
259         struct hif_shm *hif_shm;
260         int rc;
261
262         /* Register client driver with HIF */
263         client = &priv->client;
264
265         if (client->pfe) {
266                 hif_shm = client->pfe->hif.shm;
267                 /* TODO please remove the below code of if block, once we add
268                  * the proper cleanup in eth_close
269                  */
270                 if (!test_bit(PFE_CL_GEM0 + priv->id,
271                               &hif_shm->g_client_status[0])) {
272                         /* Register client driver with HIF */
273                         memset(client, 0, sizeof(*client));
274                         client->id = PFE_CL_GEM0 + priv->id;
275                         client->tx_qn = emac_txq_cnt;
276                         client->rx_qn = EMAC_RXQ_CNT;
277                         client->priv = priv;
278                         client->pfe = priv->pfe;
279                         client->port_id = dev->data->port_id;
280                         client->event_handler = pfe_eth_event_handler;
281
282                         client->tx_qsize = EMAC_TXQ_DEPTH;
283                         client->rx_qsize = EMAC_RXQ_DEPTH;
284
285                         rc = hif_lib_client_register(client);
286                         if (rc) {
287                                 PFE_PMD_ERR("hif_lib_client_register(%d)"
288                                             " failed", client->id);
289                                 goto err0;
290                         }
291                 } else {
292                         /* Freeing the packets if already exists */
293                         int ret = 0;
294                         struct rte_mbuf *rx_pkts[32];
295                         /* TODO multiqueue support */
296                         ret = hif_lib_receive_pkt(&client->rx_q[0],
297                                                   hif_shm->pool, rx_pkts, 32);
298                         while (ret) {
299                                 int i;
300                                 for (i = 0; i < ret; i++)
301                                         rte_pktmbuf_free(rx_pkts[i]);
302                                 ret = hif_lib_receive_pkt(&client->rx_q[0],
303                                                           hif_shm->pool,
304                                                           rx_pkts, 32);
305                         }
306                 }
307         } else {
308                 /* Register client driver with HIF */
309                 memset(client, 0, sizeof(*client));
310                 client->id = PFE_CL_GEM0 + priv->id;
311                 client->tx_qn = emac_txq_cnt;
312                 client->rx_qn = EMAC_RXQ_CNT;
313                 client->priv = priv;
314                 client->pfe = priv->pfe;
315                 client->port_id = dev->data->port_id;
316                 client->event_handler = pfe_eth_event_handler;
317
318                 client->tx_qsize = EMAC_TXQ_DEPTH;
319                 client->rx_qsize = EMAC_RXQ_DEPTH;
320
321                 rc = hif_lib_client_register(client);
322                 if (rc) {
323                         PFE_PMD_ERR("hif_lib_client_register(%d) failed",
324                                     client->id);
325                         goto err0;
326                 }
327         }
328         rc = pfe_eth_start(priv);
329         dev->rx_pkt_burst = &pfe_recv_pkts;
330         dev->tx_pkt_burst = &pfe_xmit_pkts;
331         /* If no prefetch is configured. */
332         if (getenv("PFE_INTR_SUPPORT")) {
333                 dev->rx_pkt_burst = &pfe_recv_pkts_on_intr;
334                 PFE_PMD_INFO("PFE INTERRUPT Mode enabled");
335         }
336
337
338 err0:
339         return rc;
340 }
341
342 static int
343 pfe_eth_open_cdev(struct pfe_eth_priv_s *priv)
344 {
345         int pfe_cdev_fd;
346
347         if (priv == NULL)
348                 return -1;
349
350         pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY);
351         if (pfe_cdev_fd < 0) {
352                 PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
353                              PFE_CDEV_PATH);
354                 PFE_PMD_WARN("Link status update will not be available.\n");
355                 priv->link_fd = PFE_CDEV_INVALID_FD;
356                 return -1;
357         }
358
359         priv->link_fd = pfe_cdev_fd;
360
361         return 0;
362 }
363
364 static void
365 pfe_eth_close_cdev(struct pfe_eth_priv_s *priv)
366 {
367         if (priv == NULL)
368                 return;
369
370         if (priv->link_fd != PFE_CDEV_INVALID_FD) {
371                 close(priv->link_fd);
372                 priv->link_fd = PFE_CDEV_INVALID_FD;
373         }
374 }
375
376 static void
377 pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/)
378 {
379         struct pfe_eth_priv_s *priv = dev->data->dev_private;
380
381         gemac_disable(priv->EMAC_baseaddr);
382         gpi_disable(priv->GPI_baseaddr);
383
384         dev->rx_pkt_burst = &pfe_dummy_recv_pkts;
385         dev->tx_pkt_burst = &pfe_dummy_xmit_pkts;
386 }
387
388 static void
389 pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe)
390 {
391         PMD_INIT_FUNC_TRACE();
392
393         pfe_eth_stop(dev);
394         /* Close the device file for link status */
395         pfe_eth_close_cdev(dev->data->dev_private);
396
397         rte_eth_dev_release_port(dev);
398         pfe->nb_devs--;
399 }
400
401 static int
402 pfe_eth_close(struct rte_eth_dev *dev)
403 {
404         if (!dev)
405                 return -1;
406
407         if (!g_pfe)
408                 return -1;
409
410         pfe_eth_exit(dev, g_pfe);
411
412         if (g_pfe->nb_devs == 0) {
413                 pfe_hif_exit(g_pfe);
414                 pfe_hif_lib_exit(g_pfe);
415                 rte_free(g_pfe);
416                 g_pfe = NULL;
417         }
418
419         return 0;
420 }
421
422 static int
423 pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
424 {
425         return 0;
426 }
427
428 static int
429 pfe_eth_info(struct rte_eth_dev *dev,
430                 struct rte_eth_dev_info *dev_info)
431 {
432         dev_info->max_mac_addrs = PFE_MAX_MACS;
433         dev_info->max_rx_queues = dev->data->nb_rx_queues;
434         dev_info->max_tx_queues = dev->data->nb_tx_queues;
435         dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
436         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
437         dev_info->rx_offload_capa = dev_rx_offloads_sup;
438         dev_info->tx_offload_capa = dev_tx_offloads_sup;
439         if (pfe_svr == SVR_LS1012A_REV1) {
440                 dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
441                 dev_info->max_mtu = MAX_MTU_ON_REV1;
442         } else {
443                 dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE;
444                 dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD;
445         }
446
447         return 0;
448 }
449
450 /* Only first mb_pool given on first call of this API will be used
451  * in whole system, also nb_rx_desc and rx_conf are unused params
452  */
453 static int
454 pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
455                 __rte_unused uint16_t nb_rx_desc,
456                 __rte_unused unsigned int socket_id,
457                 __rte_unused const struct rte_eth_rxconf *rx_conf,
458                 struct rte_mempool *mb_pool)
459 {
460         int rc = 0;
461         struct pfe *pfe;
462         struct pfe_eth_priv_s *priv = dev->data->dev_private;
463
464         pfe = priv->pfe;
465
466         if (queue_idx >= EMAC_RXQ_CNT) {
467                 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
468                                 queue_idx, EMAC_RXQ_CNT);
469                 return -1;
470         }
471
472         if (!pfe->hif.setuped) {
473                 rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
474                 if (rc) {
475                         PFE_PMD_ERR("Could not allocate buffer descriptors");
476                         return -1;
477                 }
478
479                 pfe->hif.shm->pool = mb_pool;
480                 if (pfe_hif_init_buffers(&pfe->hif)) {
481                         PFE_PMD_ERR("Could not initialize buffer descriptors");
482                         return -1;
483                 }
484                 hif_init();
485                 hif_rx_enable();
486                 hif_tx_enable();
487                 pfe->hif.setuped = 1;
488         }
489         dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
490         priv->client.rx_q[queue_idx].queue_id = queue_idx;
491
492         return 0;
493 }
494
495 static void
496 pfe_rx_queue_release(void *q __rte_unused)
497 {
498         PMD_INIT_FUNC_TRACE();
499 }
500
501 static void
502 pfe_tx_queue_release(void *q __rte_unused)
503 {
504         PMD_INIT_FUNC_TRACE();
505 }
506
507 static int
508 pfe_tx_queue_setup(struct rte_eth_dev *dev,
509                    uint16_t queue_idx,
510                    __rte_unused uint16_t nb_desc,
511                    __rte_unused unsigned int socket_id,
512                    __rte_unused const struct rte_eth_txconf *tx_conf)
513 {
514         struct pfe_eth_priv_s *priv = dev->data->dev_private;
515
516         if (queue_idx >= emac_txq_cnt) {
517                 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
518                                 queue_idx, emac_txq_cnt);
519                 return -1;
520         }
521         dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
522         priv->client.tx_q[queue_idx].queue_id = queue_idx;
523         return 0;
524 }
525
526 static const uint32_t *
527 pfe_supported_ptypes_get(struct rte_eth_dev *dev)
528 {
529         static const uint32_t ptypes[] = {
530                 /*todo -= add more types */
531                 RTE_PTYPE_L2_ETHER,
532                 RTE_PTYPE_L3_IPV4,
533                 RTE_PTYPE_L3_IPV4_EXT,
534                 RTE_PTYPE_L3_IPV6,
535                 RTE_PTYPE_L3_IPV6_EXT,
536                 RTE_PTYPE_L4_TCP,
537                 RTE_PTYPE_L4_UDP,
538                 RTE_PTYPE_L4_SCTP
539         };
540
541         if (dev->rx_pkt_burst == pfe_recv_pkts ||
542                         dev->rx_pkt_burst == pfe_recv_pkts_on_intr)
543                 return ptypes;
544         return NULL;
545 }
546
547 static inline int
548 pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev,
549                                 struct rte_eth_link *link)
550 {
551         struct rte_eth_link *dst = link;
552         struct rte_eth_link *src = &dev->data->dev_link;
553
554         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
555                                 *(uint64_t *)src) == 0)
556                 return -1;
557
558         return 0;
559 }
560
561 static inline int
562 pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev,
563                                  struct rte_eth_link *link)
564 {
565         struct rte_eth_link *dst = &dev->data->dev_link;
566         struct rte_eth_link *src = link;
567
568         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
569                                 *(uint64_t *)src) == 0)
570                 return -1;
571
572         return 0;
573 }
574
575 static int
576 pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
577 {
578         int ret, ioctl_cmd = 0;
579         struct pfe_eth_priv_s *priv = dev->data->dev_private;
580         struct rte_eth_link link, old;
581         unsigned int lstatus = 1;
582
583         if (dev == NULL) {
584                 PFE_PMD_ERR("Invalid device in link_update.\n");
585                 return 0;
586         }
587
588         memset(&old, 0, sizeof(old));
589         memset(&link, 0, sizeof(struct rte_eth_link));
590
591         pfe_eth_atomic_read_link_status(dev, &old);
592
593         /* Read from PFE CDEV, status of link, if file was successfully
594          * opened.
595          */
596         if (priv->link_fd != PFE_CDEV_INVALID_FD) {
597                 if (priv->id == 0)
598                         ioctl_cmd = PFE_CDEV_ETH0_STATE_GET;
599                 if (priv->id == 1)
600                         ioctl_cmd = PFE_CDEV_ETH1_STATE_GET;
601
602                 ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus);
603                 if (ret != 0) {
604                         PFE_PMD_ERR("Unable to fetch link status (ioctl)\n");
605                         /* use dummy link value */
606                         link.link_status = 1;
607                 }
608                 PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n",
609                               lstatus, priv->id);
610         }
611
612         if (old.link_status == lstatus) {
613                 /* no change in status */
614                 PFE_PMD_DEBUG("No change in link status; Not updating.\n");
615                 return -1;
616         }
617
618         link.link_status = lstatus;
619         link.link_speed = ETH_LINK_SPEED_1G;
620         link.link_duplex = ETH_LINK_FULL_DUPLEX;
621         link.link_autoneg = ETH_LINK_AUTONEG;
622
623         pfe_eth_atomic_write_link_status(dev, &link);
624
625         PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
626                      link.link_status ? "up" : "down");
627
628         return 0;
629 }
630
631 static int
632 pfe_promiscuous_enable(struct rte_eth_dev *dev)
633 {
634         struct pfe_eth_priv_s *priv = dev->data->dev_private;
635
636         priv->promisc = 1;
637         dev->data->promiscuous = 1;
638         gemac_enable_copy_all(priv->EMAC_baseaddr);
639
640         return 0;
641 }
642
643 static int
644 pfe_promiscuous_disable(struct rte_eth_dev *dev)
645 {
646         struct pfe_eth_priv_s *priv = dev->data->dev_private;
647
648         priv->promisc = 0;
649         dev->data->promiscuous = 0;
650         gemac_disable_copy_all(priv->EMAC_baseaddr);
651
652         return 0;
653 }
654
655 static int
656 pfe_allmulticast_enable(struct rte_eth_dev *dev)
657 {
658         struct pfe_eth_priv_s *priv = dev->data->dev_private;
659         struct pfe_mac_addr    hash_addr; /* hash register structure */
660
661         /* Set the hash to rx all multicast frames */
662         hash_addr.bottom = 0xFFFFFFFF;
663         hash_addr.top = 0xFFFFFFFF;
664         gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
665         dev->data->all_multicast = 1;
666
667         return 0;
668 }
669
670 static int
671 pfe_link_down(struct rte_eth_dev *dev)
672 {
673         pfe_eth_stop(dev);
674         return 0;
675 }
676
677 static int
678 pfe_link_up(struct rte_eth_dev *dev)
679 {
680         struct pfe_eth_priv_s *priv = dev->data->dev_private;
681
682         pfe_eth_start(priv);
683         return 0;
684 }
685
686 static int
687 pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
688 {
689         int ret;
690         struct pfe_eth_priv_s *priv = dev->data->dev_private;
691         uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
692
693         /*TODO Support VLAN*/
694         ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size);
695         if (!ret)
696                 dev->data->mtu = mtu;
697
698         return ret;
699 }
700
701 /* pfe_eth_enet_addr_byte_mac
702  */
703 static int
704 pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
705                            struct pfe_mac_addr *enet_addr)
706 {
707         if (!enet_byte_addr || !enet_addr) {
708                 return -1;
709
710         } else {
711                 enet_addr->bottom = enet_byte_addr[0] |
712                         (enet_byte_addr[1] << 8) |
713                         (enet_byte_addr[2] << 16) |
714                         (enet_byte_addr[3] << 24);
715                 enet_addr->top = enet_byte_addr[4] |
716                         (enet_byte_addr[5] << 8);
717                 return 0;
718         }
719 }
720
721 static int
722 pfe_dev_set_mac_addr(struct rte_eth_dev *dev,
723                        struct rte_ether_addr *addr)
724 {
725         struct pfe_eth_priv_s *priv = dev->data->dev_private;
726         struct pfe_mac_addr spec_addr;
727         int ret;
728
729         ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr);
730         if (ret)
731                 return ret;
732
733         gemac_set_laddrN(priv->EMAC_baseaddr,
734                          (struct pfe_mac_addr *)&spec_addr, 1);
735         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
736         return 0;
737 }
738
739 static int
740 pfe_stats_get(struct rte_eth_dev *dev,
741               struct rte_eth_stats *stats)
742 {
743         struct pfe_eth_priv_s *priv = dev->data->dev_private;
744         struct rte_eth_stats *eth_stats = &priv->stats;
745
746         if (stats == NULL)
747                 return -1;
748
749         memset(stats, 0, sizeof(struct rte_eth_stats));
750
751         stats->ipackets = eth_stats->ipackets;
752         stats->ibytes = eth_stats->ibytes;
753         stats->opackets = eth_stats->opackets;
754         stats->obytes = eth_stats->obytes;
755
756         return 0;
757 }
758
759 static const struct eth_dev_ops ops = {
760         .dev_start = pfe_eth_open,
761         .dev_stop = pfe_eth_stop,
762         .dev_close = pfe_eth_close,
763         .dev_configure = pfe_eth_configure,
764         .dev_infos_get = pfe_eth_info,
765         .rx_queue_setup = pfe_rx_queue_setup,
766         .rx_queue_release  = pfe_rx_queue_release,
767         .tx_queue_setup = pfe_tx_queue_setup,
768         .tx_queue_release  = pfe_tx_queue_release,
769         .dev_supported_ptypes_get = pfe_supported_ptypes_get,
770         .link_update  = pfe_eth_link_update,
771         .promiscuous_enable   = pfe_promiscuous_enable,
772         .promiscuous_disable  = pfe_promiscuous_disable,
773         .allmulticast_enable  = pfe_allmulticast_enable,
774         .dev_set_link_down    = pfe_link_down,
775         .dev_set_link_up      = pfe_link_up,
776         .mtu_set              = pfe_mtu_set,
777         .mac_addr_set         = pfe_dev_set_mac_addr,
778         .stats_get            = pfe_stats_get,
779 };
780
781 static int
782 pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
783 {
784         struct rte_eth_dev *eth_dev = NULL;
785         struct pfe_eth_priv_s *priv = NULL;
786         struct ls1012a_eth_platform_data *einfo;
787         struct ls1012a_pfe_platform_data *pfe_info;
788         struct rte_ether_addr addr;
789         int err;
790
791         eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
792         if (eth_dev == NULL)
793                 return -ENOMEM;
794
795         /* Extract pltform data */
796         pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
797         if (!pfe_info) {
798                 PFE_PMD_ERR("pfe missing additional platform data");
799                 err = -ENODEV;
800                 goto err0;
801         }
802
803         einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata;
804
805         /* einfo never be NULL, but no harm in having this check */
806         if (!einfo) {
807                 PFE_PMD_ERR("pfe missing additional gemacs platform data");
808                 err = -ENODEV;
809                 goto err0;
810         }
811
812         priv = eth_dev->data->dev_private;
813         priv->ndev = eth_dev;
814         priv->id = einfo[id].gem_id;
815         priv->pfe = pfe;
816
817         pfe->eth.eth_priv[id] = priv;
818
819         /* Set the info in the priv to the current info */
820         priv->einfo = &einfo[id];
821         priv->EMAC_baseaddr = cbus_emac_base[id];
822         priv->PHY_baseaddr = cbus_emac_base[id];
823         priv->GPI_baseaddr = cbus_gpi_base[id];
824
825 #define HIF_GEMAC_TMUQ_BASE     6
826         priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
827         priv->high_tmu_q = priv->low_tmu_q + 1;
828
829         rte_spinlock_init(&priv->lock);
830
831         /* Copy the station address into the dev structure, */
832         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
833                         ETHER_ADDR_LEN * PFE_MAX_MACS, 0);
834         if (eth_dev->data->mac_addrs == NULL) {
835                 PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
836                         ETHER_ADDR_LEN * PFE_MAX_MACS);
837                 err = -ENOMEM;
838                 goto err0;
839         }
840
841         memcpy(addr.addr_bytes, priv->einfo->mac_addr,
842                        ETH_ALEN);
843
844         pfe_dev_set_mac_addr(eth_dev, &addr);
845         rte_ether_addr_copy(&addr, &eth_dev->data->mac_addrs[0]);
846
847         eth_dev->data->mtu = 1500;
848         eth_dev->dev_ops = &ops;
849         pfe_eth_stop(eth_dev);
850         pfe_gemac_init(priv);
851
852         eth_dev->data->nb_rx_queues = 1;
853         eth_dev->data->nb_tx_queues = 1;
854
855         /* For link status, open the PFE CDEV; Error from this function
856          * is silently ignored; In case of error, the link status will not
857          * be available.
858          */
859         pfe_eth_open_cdev(priv);
860         rte_eth_dev_probing_finish(eth_dev);
861
862         return 0;
863 err0:
864         rte_eth_dev_release_port(eth_dev);
865         return err;
866 }
867
868 static int
869 pfe_get_gemac_if_proprties(struct pfe *pfe,
870                 __rte_unused const struct device_node *parent,
871                 unsigned int port, unsigned int if_cnt,
872                 struct ls1012a_pfe_platform_data *pdata)
873 {
874         const struct device_node *gem = NULL;
875         size_t size;
876         unsigned int ii = 0, phy_id = 0;
877         const u32 *addr;
878         const void *mac_addr;
879
880         for (ii = 0; ii < if_cnt; ii++) {
881                 gem = of_get_next_child(parent, gem);
882                 if (!gem)
883                         goto err;
884                 addr = of_get_property(gem, "reg", &size);
885                 if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port))
886                         break;
887         }
888
889         if (ii >= if_cnt) {
890                 PFE_PMD_ERR("Failed to find interface = %d", if_cnt);
891                 goto err;
892         }
893
894         pdata->ls1012a_eth_pdata[port].gem_id = port;
895
896         mac_addr = of_get_mac_address(gem);
897
898         if (mac_addr) {
899                 memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
900                        ETH_ALEN);
901         }
902
903         addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
904         if (!addr) {
905                 PFE_PMD_ERR("Invalid mdio-mux-val....");
906         } else {
907                 phy_id = rte_be_to_cpu_32((unsigned int)*addr);
908                 pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
909         }
910         if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
911                 pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
912                          pdata->ls1012a_eth_pdata[port].mdio_muxval;
913
914         return 0;
915
916 err:
917         return -1;
918 }
919
920 /* Parse integer from integer argument */
921 static int
922 parse_integer_arg(const char *key __rte_unused,
923                 const char *value, void *extra_args)
924 {
925         int i;
926         char *end;
927         errno = 0;
928
929         i = strtol(value, &end, 10);
930         if (*end != 0 || errno != 0 || i < 0 || i > 1) {
931                 PFE_PMD_ERR("Supported Port IDS are 0 and 1");
932                 return -EINVAL;
933         }
934
935         *((uint32_t *)extra_args) = i;
936
937         return 0;
938 }
939
940 static int
941 pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params,
942                            struct rte_vdev_device *dev)
943 {
944         struct rte_kvargs *kvlist = NULL;
945         int ret = 0;
946
947         static const char * const pfe_vdev_valid_params[] = {
948                 PFE_VDEV_GEM_ID_ARG,
949                 NULL
950         };
951
952         const char *input_args = rte_vdev_device_args(dev);
953
954         if (!input_args)
955                 return -1;
956
957         kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params);
958         if (kvlist == NULL)
959                 return -1;
960
961         ret = rte_kvargs_process(kvlist,
962                                 PFE_VDEV_GEM_ID_ARG,
963                                 &parse_integer_arg,
964                                 &params->gem_id);
965         rte_kvargs_free(kvlist);
966         return ret;
967 }
968
969 static int
970 pmd_pfe_probe(struct rte_vdev_device *vdev)
971 {
972         const u32 *prop;
973         const struct device_node *np;
974         const char *name;
975         const uint32_t *addr;
976         uint64_t cbus_addr, ddr_size, cbus_size;
977         int rc = -1, fd = -1, gem_id;
978         unsigned int ii, interface_count = 0;
979         size_t size = 0;
980         struct pfe_vdev_init_params init_params = {
981                 .gem_id = -1
982         };
983
984         name = rte_vdev_device_name(vdev);
985         rc = pfe_parse_vdev_init_params(&init_params, vdev);
986         if (rc < 0)
987                 return -EINVAL;
988
989         PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d",
990                 name, init_params.gem_id);
991
992         if (g_pfe) {
993                 if (g_pfe->nb_devs >= g_pfe->max_intf) {
994                         PFE_PMD_ERR("PFE %d dev already created Max is %d",
995                                 g_pfe->nb_devs, g_pfe->max_intf);
996                         return -EINVAL;
997                 }
998                 goto eth_init;
999         }
1000
1001         g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE);
1002         if (g_pfe == NULL)
1003                 return  -EINVAL;
1004
1005         /* Load the device-tree driver */
1006         rc = of_init();
1007         if (rc) {
1008                 PFE_PMD_ERR("of_init failed with ret: %d", rc);
1009                 goto err;
1010         }
1011
1012         np = of_find_compatible_node(NULL, NULL, "fsl,pfe");
1013         if (!np) {
1014                 PFE_PMD_ERR("Invalid device node");
1015                 rc = -EINVAL;
1016                 goto err;
1017         }
1018
1019         addr = of_get_address(np, 0, &cbus_size, NULL);
1020         if (!addr) {
1021                 PFE_PMD_ERR("of_get_address cannot return qman address\n");
1022                 goto err;
1023         }
1024         cbus_addr = of_translate_address(np, addr);
1025         if (!cbus_addr) {
1026                 PFE_PMD_ERR("of_translate_address failed\n");
1027                 goto err;
1028         }
1029
1030         addr = of_get_address(np, 1, &ddr_size, NULL);
1031         if (!addr) {
1032                 PFE_PMD_ERR("of_get_address cannot return qman address\n");
1033                 goto err;
1034         }
1035
1036         g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr);
1037         if (!g_pfe->ddr_phys_baseaddr) {
1038                 PFE_PMD_ERR("of_translate_address failed\n");
1039                 goto err;
1040         }
1041
1042         g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr);
1043         g_pfe->ddr_size = ddr_size;
1044         g_pfe->cbus_size = cbus_size;
1045
1046         fd = open("/dev/mem", O_RDWR);
1047         g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE,
1048                                         MAP_SHARED, fd, cbus_addr);
1049         close(fd);
1050         if (g_pfe->cbus_baseaddr == MAP_FAILED) {
1051                 PFE_PMD_ERR("Can not map cbus base");
1052                 rc = -EINVAL;
1053                 goto err;
1054         }
1055
1056         /* Read interface count */
1057         prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
1058         if (!prop) {
1059                 PFE_PMD_ERR("Failed to read number of interfaces");
1060                 rc = -ENXIO;
1061                 goto err_prop;
1062         }
1063
1064         interface_count = rte_be_to_cpu_32((unsigned int)*prop);
1065         if (interface_count <= 0) {
1066                 PFE_PMD_ERR("No ethernet interface count : %d",
1067                                 interface_count);
1068                 rc = -ENXIO;
1069                 goto err_prop;
1070         }
1071         PFE_PMD_INFO("num interfaces = %d ", interface_count);
1072
1073         g_pfe->max_intf  = interface_count;
1074         g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
1075
1076         for (ii = 0; ii < interface_count; ii++) {
1077                 pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count,
1078                                            &g_pfe->platform_data);
1079         }
1080
1081         pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr,
1082                      g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size);
1083
1084         PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION));
1085         PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION));
1086
1087         PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION));
1088         PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION));
1089
1090         PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION));
1091         PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION));
1092         PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION));
1093
1094         PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION));
1095         PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION));
1096
1097         cbus_emac_base[0] = EMAC1_BASE_ADDR;
1098         cbus_emac_base[1] = EMAC2_BASE_ADDR;
1099
1100         cbus_gpi_base[0] = EGPI1_BASE_ADDR;
1101         cbus_gpi_base[1] = EGPI2_BASE_ADDR;
1102
1103         rc = pfe_hif_lib_init(g_pfe);
1104         if (rc < 0)
1105                 goto err_hif_lib;
1106
1107         rc = pfe_hif_init(g_pfe);
1108         if (rc < 0)
1109                 goto err_hif;
1110         pfe_soc_version_get();
1111 eth_init:
1112         if (init_params.gem_id < 0)
1113                 gem_id = g_pfe->nb_devs;
1114         else
1115                 gem_id = init_params.gem_id;
1116
1117         PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)",
1118                 name, gem_id, init_params.gem_id);
1119
1120         rc = pfe_eth_init(vdev, g_pfe, gem_id);
1121         if (rc < 0)
1122                 goto err_eth;
1123         else
1124                 g_pfe->nb_devs++;
1125
1126         return 0;
1127
1128 err_eth:
1129         pfe_hif_exit(g_pfe);
1130
1131 err_hif:
1132         pfe_hif_lib_exit(g_pfe);
1133
1134 err_hif_lib:
1135 err_prop:
1136         munmap(g_pfe->cbus_baseaddr, cbus_size);
1137 err:
1138         rte_free(g_pfe);
1139         return rc;
1140 }
1141
1142 static int
1143 pmd_pfe_remove(struct rte_vdev_device *vdev)
1144 {
1145         const char *name;
1146         struct rte_eth_dev *eth_dev = NULL;
1147
1148         name = rte_vdev_device_name(vdev);
1149         if (name == NULL)
1150                 return -EINVAL;
1151
1152         PFE_PMD_INFO("Closing eventdev sw device %s", name);
1153
1154         if (!g_pfe)
1155                 return 0;
1156
1157         eth_dev = rte_eth_dev_allocated(name);
1158         if (eth_dev == NULL)
1159                 return -ENODEV;
1160
1161         pfe_eth_exit(eth_dev, g_pfe);
1162         munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size);
1163
1164         if (g_pfe->nb_devs == 0) {
1165                 pfe_hif_exit(g_pfe);
1166                 pfe_hif_lib_exit(g_pfe);
1167                 rte_free(g_pfe);
1168                 g_pfe = NULL;
1169         }
1170         return 0;
1171 }
1172
1173 static
1174 struct rte_vdev_driver pmd_pfe_drv = {
1175         .probe = pmd_pfe_probe,
1176         .remove = pmd_pfe_remove,
1177 };
1178
1179 RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv);
1180 RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> ");
1181 RTE_LOG_REGISTER(pfe_logtype_pmd, pmd.net.pfe, NOTICE);