bdf2a7c76afb045e74524dfe6fc08634b335b6e4
[dpdk.git] / drivers / net / pfe / pfe_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <sys/ioctl.h>
6 #include <sys/epoll.h>
7 #include <rte_kvargs.h>
8 #include <rte_ethdev_vdev.h>
9 #include <rte_bus_vdev.h>
10 #include <rte_ether.h>
11 #include <dpaa_of.h>
12
13 #include "pfe_logs.h"
14 #include "pfe_mod.h"
15
16 #define PFE_MAX_MACS 1 /*we can support upto 4 MACs per IF*/
17 #define PFE_VDEV_GEM_ID_ARG     "intf"
18
19 struct pfe_vdev_init_params {
20         int8_t  gem_id;
21 };
22 static struct pfe *g_pfe;
23 /* Supported Rx offloads */
24 static uint64_t dev_rx_offloads_sup =
25                 DEV_RX_OFFLOAD_IPV4_CKSUM |
26                 DEV_RX_OFFLOAD_UDP_CKSUM |
27                 DEV_RX_OFFLOAD_TCP_CKSUM;
28
29 /* Supported Tx offloads */
30 static uint64_t dev_tx_offloads_sup =
31                 DEV_TX_OFFLOAD_IPV4_CKSUM |
32                 DEV_TX_OFFLOAD_UDP_CKSUM |
33                 DEV_TX_OFFLOAD_TCP_CKSUM;
34
35 /* TODO: make pfe_svr a runtime option.
36  * Driver should be able to get the SVR
37  * information from HW.
38  */
39 unsigned int pfe_svr = SVR_LS1012A_REV1;
40 static void *cbus_emac_base[3];
41 static void *cbus_gpi_base[3];
42
43 int pfe_logtype_pmd;
44
45 /* pfe_gemac_init
46  */
47 static int
48 pfe_gemac_init(struct pfe_eth_priv_s *priv)
49 {
50         struct gemac_cfg cfg;
51
52         cfg.speed = SPEED_1000M;
53         cfg.duplex = DUPLEX_FULL;
54
55         gemac_set_config(priv->EMAC_baseaddr, &cfg);
56         gemac_allow_broadcast(priv->EMAC_baseaddr);
57         gemac_enable_1536_rx(priv->EMAC_baseaddr);
58         gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
59         gemac_enable_pause_rx(priv->EMAC_baseaddr);
60         gemac_set_bus_width(priv->EMAC_baseaddr, 64);
61         gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
62
63         return 0;
64 }
65
66 static void
67 pfe_soc_version_get(void)
68 {
69         FILE *svr_file = NULL;
70         unsigned int svr_ver = 0;
71
72         PMD_INIT_FUNC_TRACE();
73
74         svr_file = fopen(PFE_SOC_ID_FILE, "r");
75         if (!svr_file) {
76                 PFE_PMD_ERR("Unable to open SoC device");
77                 return; /* Not supported on this infra */
78         }
79
80         if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
81                 pfe_svr = svr_ver;
82         else
83                 PFE_PMD_ERR("Unable to read SoC device");
84
85         fclose(svr_file);
86 }
87
88 static int pfe_eth_start(struct pfe_eth_priv_s *priv)
89 {
90         gpi_enable(priv->GPI_baseaddr);
91         gemac_enable(priv->EMAC_baseaddr);
92
93         return 0;
94 }
95
96 static void
97 pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
98                   __rte_unused from_tx, __rte_unused int n_desc)
99 {
100         struct rte_mbuf *mbuf;
101         unsigned int flags;
102
103         /* Clean HIF and client queue */
104         while ((mbuf = hif_lib_tx_get_next_complete(&priv->client,
105                                                    tx_q_num, &flags,
106                                                    HIF_TX_DESC_NT))) {
107                 if (mbuf) {
108                         mbuf->next = NULL;
109                         mbuf->nb_segs = 1;
110                         rte_pktmbuf_free(mbuf);
111                 }
112         }
113 }
114
115
116 static void
117 pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
118 {
119         unsigned int ii;
120
121         for (ii = 0; ii < emac_txq_cnt; ii++)
122                 pfe_eth_flush_txQ(priv, ii, 0, 0);
123 }
124
125 static int
126 pfe_eth_event_handler(void *data, int event, __rte_unused int qno)
127 {
128         struct pfe_eth_priv_s *priv = data;
129
130         switch (event) {
131         case EVENT_TXDONE_IND:
132                 pfe_eth_flush_tx(priv);
133                 hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
134                 break;
135         case EVENT_HIGH_RX_WM:
136         default:
137                 break;
138         }
139
140         return 0;
141 }
142
143 static uint16_t
144 pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
145 {
146         struct hif_client_rx_queue *queue = rxq;
147         struct pfe_eth_priv_s *priv = queue->priv;
148         struct epoll_event epoll_ev;
149         uint64_t ticks = 1;  /* 1 msec */
150         int ret;
151         int have_something, work_done;
152
153 #define RESET_STATUS (HIF_INT | HIF_RXPKT_INT)
154
155         /*TODO can we remove this cleanup from here?*/
156         pfe_tx_do_cleanup(priv->pfe);
157         have_something = pfe_hif_rx_process(priv->pfe, nb_pkts);
158         work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool,
159                         rx_pkts, nb_pkts);
160
161         if (!have_something || !work_done) {
162                 writel(RESET_STATUS, HIF_INT_SRC);
163                 writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE);
164                 ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks);
165                 if (ret < 0 && errno != EINTR)
166                         PFE_PMD_ERR("epoll_wait fails with %d\n", errno);
167         }
168
169         return work_done;
170 }
171
172 static uint16_t
173 pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
174 {
175         struct hif_client_rx_queue *queue = rxq;
176         struct pfe_eth_priv_s *priv = queue->priv;
177         struct rte_mempool *pool;
178
179         /*TODO can we remove this cleanup from here?*/
180         pfe_tx_do_cleanup(priv->pfe);
181         pfe_hif_rx_process(priv->pfe, nb_pkts);
182         pool = priv->pfe->hif.shm->pool;
183
184         return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts);
185 }
186
187 static uint16_t
188 pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
189 {
190         struct hif_client_tx_queue *queue = tx_queue;
191         struct pfe_eth_priv_s *priv = queue->priv;
192         struct rte_eth_stats *stats = &priv->stats;
193         int i;
194
195         for (i = 0; i < nb_pkts; i++) {
196                 if (tx_pkts[i]->nb_segs > 1) {
197                         struct rte_mbuf *mbuf;
198                         int j;
199
200                         hif_lib_xmit_pkt(&priv->client, queue->queue_id,
201                                 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
202                                 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
203                                 tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER,
204                                 tx_pkts[i]);
205
206                         mbuf = tx_pkts[i]->next;
207                         for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) {
208                                 hif_lib_xmit_pkt(&priv->client, queue->queue_id,
209                                         (void *)(size_t)rte_pktmbuf_iova(mbuf),
210                                         mbuf->buf_addr + mbuf->data_off,
211                                         mbuf->data_len,
212                                         0x0, 0x0, mbuf);
213                                 mbuf = mbuf->next;
214                         }
215
216                         hif_lib_xmit_pkt(&priv->client, queue->queue_id,
217                                         (void *)(size_t)rte_pktmbuf_iova(mbuf),
218                                         mbuf->buf_addr + mbuf->data_off,
219                                         mbuf->data_len,
220                                         0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
221                                         mbuf);
222                 } else {
223                         hif_lib_xmit_pkt(&priv->client, queue->queue_id,
224                                 (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
225                                 tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
226                                 tx_pkts[i]->pkt_len, 0 /*ctrl*/,
227                                 HIF_FIRST_BUFFER | HIF_LAST_BUFFER |
228                                 HIF_DATA_VALID,
229                                 tx_pkts[i]);
230                 }
231                 stats->obytes += tx_pkts[i]->pkt_len;
232                 hif_tx_dma_start();
233         }
234         stats->opackets += nb_pkts;
235         pfe_tx_do_cleanup(priv->pfe);
236
237         return nb_pkts;
238 }
239
240 static uint16_t
241 pfe_dummy_xmit_pkts(__rte_unused void *tx_queue,
242                 __rte_unused struct rte_mbuf **tx_pkts,
243                 __rte_unused uint16_t nb_pkts)
244 {
245         return 0;
246 }
247
248 static uint16_t
249 pfe_dummy_recv_pkts(__rte_unused void *rxq,
250                 __rte_unused struct rte_mbuf **rx_pkts,
251                 __rte_unused uint16_t nb_pkts)
252 {
253         return 0;
254 }
255
256 static int
257 pfe_eth_open(struct rte_eth_dev *dev)
258 {
259         struct pfe_eth_priv_s *priv = dev->data->dev_private;
260         struct hif_client_s *client;
261         struct hif_shm *hif_shm;
262         int rc;
263
264         /* Register client driver with HIF */
265         client = &priv->client;
266
267         if (client->pfe) {
268                 hif_shm = client->pfe->hif.shm;
269                 /* TODO please remove the below code of if block, once we add
270                  * the proper cleanup in eth_close
271                  */
272                 if (!test_bit(PFE_CL_GEM0 + priv->id,
273                               &hif_shm->g_client_status[0])) {
274                         /* Register client driver with HIF */
275                         memset(client, 0, sizeof(*client));
276                         client->id = PFE_CL_GEM0 + priv->id;
277                         client->tx_qn = emac_txq_cnt;
278                         client->rx_qn = EMAC_RXQ_CNT;
279                         client->priv = priv;
280                         client->pfe = priv->pfe;
281                         client->port_id = dev->data->port_id;
282                         client->event_handler = pfe_eth_event_handler;
283
284                         client->tx_qsize = EMAC_TXQ_DEPTH;
285                         client->rx_qsize = EMAC_RXQ_DEPTH;
286
287                         rc = hif_lib_client_register(client);
288                         if (rc) {
289                                 PFE_PMD_ERR("hif_lib_client_register(%d)"
290                                             " failed", client->id);
291                                 goto err0;
292                         }
293                 } else {
294                         /* Freeing the packets if already exists */
295                         int ret = 0;
296                         struct rte_mbuf *rx_pkts[32];
297                         /* TODO multiqueue support */
298                         ret = hif_lib_receive_pkt(&client->rx_q[0],
299                                                   hif_shm->pool, rx_pkts, 32);
300                         while (ret) {
301                                 int i;
302                                 for (i = 0; i < ret; i++)
303                                         rte_pktmbuf_free(rx_pkts[i]);
304                                 ret = hif_lib_receive_pkt(&client->rx_q[0],
305                                                           hif_shm->pool,
306                                                           rx_pkts, 32);
307                         }
308                 }
309         } else {
310                 /* Register client driver with HIF */
311                 memset(client, 0, sizeof(*client));
312                 client->id = PFE_CL_GEM0 + priv->id;
313                 client->tx_qn = emac_txq_cnt;
314                 client->rx_qn = EMAC_RXQ_CNT;
315                 client->priv = priv;
316                 client->pfe = priv->pfe;
317                 client->port_id = dev->data->port_id;
318                 client->event_handler = pfe_eth_event_handler;
319
320                 client->tx_qsize = EMAC_TXQ_DEPTH;
321                 client->rx_qsize = EMAC_RXQ_DEPTH;
322
323                 rc = hif_lib_client_register(client);
324                 if (rc) {
325                         PFE_PMD_ERR("hif_lib_client_register(%d) failed",
326                                     client->id);
327                         goto err0;
328                 }
329         }
330         rc = pfe_eth_start(priv);
331         dev->rx_pkt_burst = &pfe_recv_pkts;
332         dev->tx_pkt_burst = &pfe_xmit_pkts;
333         /* If no prefetch is configured. */
334         if (getenv("PFE_INTR_SUPPORT")) {
335                 dev->rx_pkt_burst = &pfe_recv_pkts_on_intr;
336                 PFE_PMD_INFO("PFE INTERRUPT Mode enabled");
337         }
338
339
340 err0:
341         return rc;
342 }
343
344 static int
345 pfe_eth_open_cdev(struct pfe_eth_priv_s *priv)
346 {
347         int pfe_cdev_fd;
348
349         if (priv == NULL)
350                 return -1;
351
352         pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDONLY);
353         if (pfe_cdev_fd < 0) {
354                 PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
355                              PFE_CDEV_PATH);
356                 PFE_PMD_WARN("Link status update will not be available.\n");
357                 priv->link_fd = PFE_CDEV_INVALID_FD;
358                 return -1;
359         }
360
361         priv->link_fd = pfe_cdev_fd;
362
363         return 0;
364 }
365
366 static void
367 pfe_eth_close_cdev(struct pfe_eth_priv_s *priv)
368 {
369         if (priv == NULL)
370                 return;
371
372         if (priv->link_fd != PFE_CDEV_INVALID_FD) {
373                 close(priv->link_fd);
374                 priv->link_fd = PFE_CDEV_INVALID_FD;
375         }
376 }
377
378 static void
379 pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/)
380 {
381         struct pfe_eth_priv_s *priv = dev->data->dev_private;
382
383         gemac_disable(priv->EMAC_baseaddr);
384         gpi_disable(priv->GPI_baseaddr);
385
386         dev->rx_pkt_burst = &pfe_dummy_recv_pkts;
387         dev->tx_pkt_burst = &pfe_dummy_xmit_pkts;
388 }
389
390 static void
391 pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe)
392 {
393         PMD_INIT_FUNC_TRACE();
394
395         pfe_eth_stop(dev);
396         /* Close the device file for link status */
397         pfe_eth_close_cdev(dev->data->dev_private);
398
399         rte_eth_dev_release_port(dev);
400         pfe->nb_devs--;
401 }
402
403 static void
404 pfe_eth_close(struct rte_eth_dev *dev)
405 {
406         if (!dev)
407                 return;
408
409         if (!g_pfe)
410                 return;
411
412         pfe_eth_exit(dev, g_pfe);
413
414         if (g_pfe->nb_devs == 0) {
415                 pfe_hif_exit(g_pfe);
416                 pfe_hif_lib_exit(g_pfe);
417                 rte_free(g_pfe);
418                 g_pfe = NULL;
419         }
420 }
421
422 static int
423 pfe_eth_configure(struct rte_eth_dev *dev __rte_unused)
424 {
425         return 0;
426 }
427
428 static int
429 pfe_eth_info(struct rte_eth_dev *dev,
430                 struct rte_eth_dev_info *dev_info)
431 {
432         struct pfe_eth_priv_s *internals = dev->data->dev_private;
433
434         dev_info->if_index = internals->id;
435         dev_info->max_mac_addrs = PFE_MAX_MACS;
436         dev_info->max_rx_queues = dev->data->nb_rx_queues;
437         dev_info->max_tx_queues = dev->data->nb_tx_queues;
438         dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE;
439         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
440         dev_info->rx_offload_capa = dev_rx_offloads_sup;
441         dev_info->tx_offload_capa = dev_tx_offloads_sup;
442         if (pfe_svr == SVR_LS1012A_REV1) {
443                 dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD;
444                 dev_info->max_mtu = MAX_MTU_ON_REV1;
445         } else {
446                 dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE;
447                 dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD;
448         }
449
450         return 0;
451 }
452
453 /* Only first mb_pool given on first call of this API will be used
454  * in whole system, also nb_rx_desc and rx_conf are unused params
455  */
456 static int
457 pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
458                 __rte_unused uint16_t nb_rx_desc,
459                 __rte_unused unsigned int socket_id,
460                 __rte_unused const struct rte_eth_rxconf *rx_conf,
461                 struct rte_mempool *mb_pool)
462 {
463         int rc = 0;
464         struct pfe *pfe;
465         struct pfe_eth_priv_s *priv = dev->data->dev_private;
466
467         pfe = priv->pfe;
468
469         if (queue_idx >= EMAC_RXQ_CNT) {
470                 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
471                                 queue_idx, EMAC_RXQ_CNT);
472                 return -1;
473         }
474
475         if (!pfe->hif.setuped) {
476                 rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool);
477                 if (rc) {
478                         PFE_PMD_ERR("Could not allocate buffer descriptors");
479                         return -1;
480                 }
481
482                 pfe->hif.shm->pool = mb_pool;
483                 if (pfe_hif_init_buffers(&pfe->hif)) {
484                         PFE_PMD_ERR("Could not initialize buffer descriptors");
485                         return -1;
486                 }
487                 hif_init();
488                 hif_rx_enable();
489                 hif_tx_enable();
490                 pfe->hif.setuped = 1;
491         }
492         dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx];
493         priv->client.rx_q[queue_idx].queue_id = queue_idx;
494
495         return 0;
496 }
497
498 static void
499 pfe_rx_queue_release(void *q __rte_unused)
500 {
501         PMD_INIT_FUNC_TRACE();
502 }
503
504 static void
505 pfe_tx_queue_release(void *q __rte_unused)
506 {
507         PMD_INIT_FUNC_TRACE();
508 }
509
510 static int
511 pfe_tx_queue_setup(struct rte_eth_dev *dev,
512                    uint16_t queue_idx,
513                    __rte_unused uint16_t nb_desc,
514                    __rte_unused unsigned int socket_id,
515                    __rte_unused const struct rte_eth_txconf *tx_conf)
516 {
517         struct pfe_eth_priv_s *priv = dev->data->dev_private;
518
519         if (queue_idx >= emac_txq_cnt) {
520                 PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d",
521                                 queue_idx, emac_txq_cnt);
522                 return -1;
523         }
524         dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx];
525         priv->client.tx_q[queue_idx].queue_id = queue_idx;
526         return 0;
527 }
528
529 static const uint32_t *
530 pfe_supported_ptypes_get(struct rte_eth_dev *dev)
531 {
532         static const uint32_t ptypes[] = {
533                 /*todo -= add more types */
534                 RTE_PTYPE_L2_ETHER,
535                 RTE_PTYPE_L3_IPV4,
536                 RTE_PTYPE_L3_IPV4_EXT,
537                 RTE_PTYPE_L3_IPV6,
538                 RTE_PTYPE_L3_IPV6_EXT,
539                 RTE_PTYPE_L4_TCP,
540                 RTE_PTYPE_L4_UDP,
541                 RTE_PTYPE_L4_SCTP
542         };
543
544         if (dev->rx_pkt_burst == pfe_recv_pkts ||
545                         dev->rx_pkt_burst == pfe_recv_pkts_on_intr)
546                 return ptypes;
547         return NULL;
548 }
549
550 static inline int
551 pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev,
552                                 struct rte_eth_link *link)
553 {
554         struct rte_eth_link *dst = link;
555         struct rte_eth_link *src = &dev->data->dev_link;
556
557         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
558                                 *(uint64_t *)src) == 0)
559                 return -1;
560
561         return 0;
562 }
563
564 static inline int
565 pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev,
566                                  struct rte_eth_link *link)
567 {
568         struct rte_eth_link *dst = &dev->data->dev_link;
569         struct rte_eth_link *src = link;
570
571         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
572                                 *(uint64_t *)src) == 0)
573                 return -1;
574
575         return 0;
576 }
577
578 static int
579 pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
580 {
581         int ret, ioctl_cmd = 0;
582         struct pfe_eth_priv_s *priv = dev->data->dev_private;
583         struct rte_eth_link link, old;
584         unsigned int lstatus = 1;
585
586         if (dev == NULL) {
587                 PFE_PMD_ERR("Invalid device in link_update.\n");
588                 return 0;
589         }
590
591         memset(&old, 0, sizeof(old));
592         memset(&link, 0, sizeof(struct rte_eth_link));
593
594         pfe_eth_atomic_read_link_status(dev, &old);
595
596         /* Read from PFE CDEV, status of link, if file was successfully
597          * opened.
598          */
599         if (priv->link_fd != PFE_CDEV_INVALID_FD) {
600                 if (priv->id == 0)
601                         ioctl_cmd = PFE_CDEV_ETH0_STATE_GET;
602                 if (priv->id == 1)
603                         ioctl_cmd = PFE_CDEV_ETH1_STATE_GET;
604
605                 ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus);
606                 if (ret != 0) {
607                         PFE_PMD_ERR("Unable to fetch link status (ioctl)\n");
608                         /* use dummy link value */
609                         link.link_status = 1;
610                 }
611                 PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n",
612                               lstatus, priv->id);
613         }
614
615         if (old.link_status == lstatus) {
616                 /* no change in status */
617                 PFE_PMD_DEBUG("No change in link status; Not updating.\n");
618                 return -1;
619         }
620
621         link.link_status = lstatus;
622         link.link_speed = ETH_LINK_SPEED_1G;
623         link.link_duplex = ETH_LINK_FULL_DUPLEX;
624         link.link_autoneg = ETH_LINK_AUTONEG;
625
626         pfe_eth_atomic_write_link_status(dev, &link);
627
628         PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id,
629                      link.link_status ? "up" : "down");
630
631         return 0;
632 }
633
634 static int
635 pfe_promiscuous_enable(struct rte_eth_dev *dev)
636 {
637         struct pfe_eth_priv_s *priv = dev->data->dev_private;
638
639         priv->promisc = 1;
640         dev->data->promiscuous = 1;
641         gemac_enable_copy_all(priv->EMAC_baseaddr);
642
643         return 0;
644 }
645
646 static int
647 pfe_promiscuous_disable(struct rte_eth_dev *dev)
648 {
649         struct pfe_eth_priv_s *priv = dev->data->dev_private;
650
651         priv->promisc = 0;
652         dev->data->promiscuous = 0;
653         gemac_disable_copy_all(priv->EMAC_baseaddr);
654
655         return 0;
656 }
657
658 static int
659 pfe_allmulticast_enable(struct rte_eth_dev *dev)
660 {
661         struct pfe_eth_priv_s *priv = dev->data->dev_private;
662         struct pfe_mac_addr    hash_addr; /* hash register structure */
663
664         /* Set the hash to rx all multicast frames */
665         hash_addr.bottom = 0xFFFFFFFF;
666         hash_addr.top = 0xFFFFFFFF;
667         gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
668         dev->data->all_multicast = 1;
669
670         return 0;
671 }
672
673 static int
674 pfe_link_down(struct rte_eth_dev *dev)
675 {
676         pfe_eth_stop(dev);
677         return 0;
678 }
679
680 static int
681 pfe_link_up(struct rte_eth_dev *dev)
682 {
683         struct pfe_eth_priv_s *priv = dev->data->dev_private;
684
685         pfe_eth_start(priv);
686         return 0;
687 }
688
689 static int
690 pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
691 {
692         int ret;
693         struct pfe_eth_priv_s *priv = dev->data->dev_private;
694         uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
695
696         /*TODO Support VLAN*/
697         ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size);
698         if (!ret)
699                 dev->data->mtu = mtu;
700
701         return ret;
702 }
703
704 /* pfe_eth_enet_addr_byte_mac
705  */
706 static int
707 pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
708                            struct pfe_mac_addr *enet_addr)
709 {
710         if (!enet_byte_addr || !enet_addr) {
711                 return -1;
712
713         } else {
714                 enet_addr->bottom = enet_byte_addr[0] |
715                         (enet_byte_addr[1] << 8) |
716                         (enet_byte_addr[2] << 16) |
717                         (enet_byte_addr[3] << 24);
718                 enet_addr->top = enet_byte_addr[4] |
719                         (enet_byte_addr[5] << 8);
720                 return 0;
721         }
722 }
723
724 static int
725 pfe_dev_set_mac_addr(struct rte_eth_dev *dev,
726                        struct rte_ether_addr *addr)
727 {
728         struct pfe_eth_priv_s *priv = dev->data->dev_private;
729         struct pfe_mac_addr spec_addr;
730         int ret;
731
732         ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr);
733         if (ret)
734                 return ret;
735
736         gemac_set_laddrN(priv->EMAC_baseaddr,
737                          (struct pfe_mac_addr *)&spec_addr, 1);
738         rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
739         return 0;
740 }
741
742 static int
743 pfe_stats_get(struct rte_eth_dev *dev,
744               struct rte_eth_stats *stats)
745 {
746         struct pfe_eth_priv_s *priv = dev->data->dev_private;
747         struct rte_eth_stats *eth_stats = &priv->stats;
748
749         if (stats == NULL)
750                 return -1;
751
752         memset(stats, 0, sizeof(struct rte_eth_stats));
753
754         stats->ipackets = eth_stats->ipackets;
755         stats->ibytes = eth_stats->ibytes;
756         stats->opackets = eth_stats->opackets;
757         stats->obytes = eth_stats->obytes;
758
759         return 0;
760 }
761
762 static const struct eth_dev_ops ops = {
763         .dev_start = pfe_eth_open,
764         .dev_stop = pfe_eth_stop,
765         .dev_close = pfe_eth_close,
766         .dev_configure = pfe_eth_configure,
767         .dev_infos_get = pfe_eth_info,
768         .rx_queue_setup = pfe_rx_queue_setup,
769         .rx_queue_release  = pfe_rx_queue_release,
770         .tx_queue_setup = pfe_tx_queue_setup,
771         .tx_queue_release  = pfe_tx_queue_release,
772         .dev_supported_ptypes_get = pfe_supported_ptypes_get,
773         .link_update  = pfe_eth_link_update,
774         .promiscuous_enable   = pfe_promiscuous_enable,
775         .promiscuous_disable  = pfe_promiscuous_disable,
776         .allmulticast_enable  = pfe_allmulticast_enable,
777         .dev_set_link_down    = pfe_link_down,
778         .dev_set_link_up      = pfe_link_up,
779         .mtu_set              = pfe_mtu_set,
780         .mac_addr_set         = pfe_dev_set_mac_addr,
781         .stats_get            = pfe_stats_get,
782 };
783
784 static int
785 pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id)
786 {
787         struct rte_eth_dev *eth_dev = NULL;
788         struct pfe_eth_priv_s *priv = NULL;
789         struct ls1012a_eth_platform_data *einfo;
790         struct ls1012a_pfe_platform_data *pfe_info;
791         struct rte_ether_addr addr;
792         int err;
793
794         eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv));
795         if (eth_dev == NULL)
796                 return -ENOMEM;
797
798         /* Extract pltform data */
799         pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
800         if (!pfe_info) {
801                 PFE_PMD_ERR("pfe missing additional platform data");
802                 err = -ENODEV;
803                 goto err0;
804         }
805
806         einfo = (struct ls1012a_eth_platform_data *)pfe_info->ls1012a_eth_pdata;
807
808         /* einfo never be NULL, but no harm in having this check */
809         if (!einfo) {
810                 PFE_PMD_ERR("pfe missing additional gemacs platform data");
811                 err = -ENODEV;
812                 goto err0;
813         }
814
815         priv = eth_dev->data->dev_private;
816         priv->ndev = eth_dev;
817         priv->id = einfo[id].gem_id;
818         priv->pfe = pfe;
819
820         pfe->eth.eth_priv[id] = priv;
821
822         /* Set the info in the priv to the current info */
823         priv->einfo = &einfo[id];
824         priv->EMAC_baseaddr = cbus_emac_base[id];
825         priv->PHY_baseaddr = cbus_emac_base[id];
826         priv->GPI_baseaddr = cbus_gpi_base[id];
827
828 #define HIF_GEMAC_TMUQ_BASE     6
829         priv->low_tmu_q = HIF_GEMAC_TMUQ_BASE + (id * 2);
830         priv->high_tmu_q = priv->low_tmu_q + 1;
831
832         rte_spinlock_init(&priv->lock);
833
834         /* Copy the station address into the dev structure, */
835         eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
836                         ETHER_ADDR_LEN * PFE_MAX_MACS, 0);
837         if (eth_dev->data->mac_addrs == NULL) {
838                 PFE_PMD_ERR("Failed to allocate mem %d to store MAC addresses",
839                         ETHER_ADDR_LEN * PFE_MAX_MACS);
840                 err = -ENOMEM;
841                 goto err0;
842         }
843
844         memcpy(addr.addr_bytes, priv->einfo->mac_addr,
845                        ETH_ALEN);
846
847         pfe_dev_set_mac_addr(eth_dev, &addr);
848         rte_ether_addr_copy(&addr, &eth_dev->data->mac_addrs[0]);
849
850         eth_dev->data->mtu = 1500;
851         eth_dev->dev_ops = &ops;
852         pfe_eth_stop(eth_dev);
853         pfe_gemac_init(priv);
854
855         eth_dev->data->nb_rx_queues = 1;
856         eth_dev->data->nb_tx_queues = 1;
857
858         /* For link status, open the PFE CDEV; Error from this function
859          * is silently ignored; In case of error, the link status will not
860          * be available.
861          */
862         pfe_eth_open_cdev(priv);
863         rte_eth_dev_probing_finish(eth_dev);
864
865         return 0;
866 err0:
867         rte_eth_dev_release_port(eth_dev);
868         return err;
869 }
870
871 static int
872 pfe_get_gemac_if_proprties(struct pfe *pfe,
873                 __rte_unused const struct device_node *parent,
874                 unsigned int port, unsigned int if_cnt,
875                 struct ls1012a_pfe_platform_data *pdata)
876 {
877         const struct device_node *gem = NULL;
878         size_t size;
879         unsigned int ii = 0, phy_id = 0;
880         const u32 *addr;
881         const void *mac_addr;
882
883         for (ii = 0; ii < if_cnt; ii++) {
884                 gem = of_get_next_child(parent, gem);
885                 if (!gem)
886                         goto err;
887                 addr = of_get_property(gem, "reg", &size);
888                 if (addr && (rte_be_to_cpu_32((unsigned int)*addr) == port))
889                         break;
890         }
891
892         if (ii >= if_cnt) {
893                 PFE_PMD_ERR("Failed to find interface = %d", if_cnt);
894                 goto err;
895         }
896
897         pdata->ls1012a_eth_pdata[port].gem_id = port;
898
899         mac_addr = of_get_mac_address(gem);
900
901         if (mac_addr) {
902                 memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
903                        ETH_ALEN);
904         }
905
906         addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
907         if (!addr) {
908                 PFE_PMD_ERR("Invalid mdio-mux-val....");
909         } else {
910                 phy_id = rte_be_to_cpu_32((unsigned int)*addr);
911                 pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
912         }
913         if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
914                 pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
915                          pdata->ls1012a_eth_pdata[port].mdio_muxval;
916
917         return 0;
918
919 err:
920         return -1;
921 }
922
923 /* Parse integer from integer argument */
924 static int
925 parse_integer_arg(const char *key __rte_unused,
926                 const char *value, void *extra_args)
927 {
928         int i;
929         char *end;
930         errno = 0;
931
932         i = strtol(value, &end, 10);
933         if (*end != 0 || errno != 0 || i < 0 || i > 1) {
934                 PFE_PMD_ERR("Supported Port IDS are 0 and 1");
935                 return -EINVAL;
936         }
937
938         *((uint32_t *)extra_args) = i;
939
940         return 0;
941 }
942
943 static int
944 pfe_parse_vdev_init_params(struct pfe_vdev_init_params *params,
945                            struct rte_vdev_device *dev)
946 {
947         struct rte_kvargs *kvlist = NULL;
948         int ret = 0;
949
950         static const char * const pfe_vdev_valid_params[] = {
951                 PFE_VDEV_GEM_ID_ARG,
952                 NULL
953         };
954
955         const char *input_args = rte_vdev_device_args(dev);
956
957         if (!input_args)
958                 return -1;
959
960         kvlist = rte_kvargs_parse(input_args, pfe_vdev_valid_params);
961         if (kvlist == NULL)
962                 return -1;
963
964         ret = rte_kvargs_process(kvlist,
965                                 PFE_VDEV_GEM_ID_ARG,
966                                 &parse_integer_arg,
967                                 &params->gem_id);
968         rte_kvargs_free(kvlist);
969         return ret;
970 }
971
972 static int
973 pmd_pfe_probe(struct rte_vdev_device *vdev)
974 {
975         const u32 *prop;
976         const struct device_node *np;
977         const char *name;
978         const uint32_t *addr;
979         uint64_t cbus_addr, ddr_size, cbus_size;
980         int rc = -1, fd = -1, gem_id;
981         unsigned int ii, interface_count = 0;
982         size_t size = 0;
983         struct pfe_vdev_init_params init_params = {
984                 .gem_id = -1
985         };
986
987         name = rte_vdev_device_name(vdev);
988         rc = pfe_parse_vdev_init_params(&init_params, vdev);
989         if (rc < 0)
990                 return -EINVAL;
991
992         PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d",
993                 name, init_params.gem_id);
994
995         if (g_pfe) {
996                 if (g_pfe->nb_devs >= g_pfe->max_intf) {
997                         PFE_PMD_ERR("PFE %d dev already created Max is %d",
998                                 g_pfe->nb_devs, g_pfe->max_intf);
999                         return -EINVAL;
1000                 }
1001                 goto eth_init;
1002         }
1003
1004         g_pfe = rte_zmalloc(NULL, sizeof(*g_pfe), RTE_CACHE_LINE_SIZE);
1005         if (g_pfe == NULL)
1006                 return  -EINVAL;
1007
1008         /* Load the device-tree driver */
1009         rc = of_init();
1010         if (rc) {
1011                 PFE_PMD_ERR("of_init failed with ret: %d", rc);
1012                 goto err;
1013         }
1014
1015         np = of_find_compatible_node(NULL, NULL, "fsl,pfe");
1016         if (!np) {
1017                 PFE_PMD_ERR("Invalid device node");
1018                 rc = -EINVAL;
1019                 goto err;
1020         }
1021
1022         addr = of_get_address(np, 0, &cbus_size, NULL);
1023         if (!addr) {
1024                 PFE_PMD_ERR("of_get_address cannot return qman address\n");
1025                 goto err;
1026         }
1027         cbus_addr = of_translate_address(np, addr);
1028         if (!cbus_addr) {
1029                 PFE_PMD_ERR("of_translate_address failed\n");
1030                 goto err;
1031         }
1032
1033         addr = of_get_address(np, 1, &ddr_size, NULL);
1034         if (!addr) {
1035                 PFE_PMD_ERR("of_get_address cannot return qman address\n");
1036                 goto err;
1037         }
1038
1039         g_pfe->ddr_phys_baseaddr = of_translate_address(np, addr);
1040         if (!g_pfe->ddr_phys_baseaddr) {
1041                 PFE_PMD_ERR("of_translate_address failed\n");
1042                 goto err;
1043         }
1044
1045         g_pfe->ddr_baseaddr = pfe_mem_ptov(g_pfe->ddr_phys_baseaddr);
1046         g_pfe->ddr_size = ddr_size;
1047         g_pfe->cbus_size = cbus_size;
1048
1049         fd = open("/dev/mem", O_RDWR);
1050         g_pfe->cbus_baseaddr = mmap(NULL, cbus_size, PROT_READ | PROT_WRITE,
1051                                         MAP_SHARED, fd, cbus_addr);
1052         close(fd);
1053         if (g_pfe->cbus_baseaddr == MAP_FAILED) {
1054                 PFE_PMD_ERR("Can not map cbus base");
1055                 rc = -EINVAL;
1056                 goto err;
1057         }
1058
1059         /* Read interface count */
1060         prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
1061         if (!prop) {
1062                 PFE_PMD_ERR("Failed to read number of interfaces");
1063                 rc = -ENXIO;
1064                 goto err_prop;
1065         }
1066
1067         interface_count = rte_be_to_cpu_32((unsigned int)*prop);
1068         if (interface_count <= 0) {
1069                 PFE_PMD_ERR("No ethernet interface count : %d",
1070                                 interface_count);
1071                 rc = -ENXIO;
1072                 goto err_prop;
1073         }
1074         PFE_PMD_INFO("num interfaces = %d ", interface_count);
1075
1076         g_pfe->max_intf  = interface_count;
1077         g_pfe->platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
1078
1079         for (ii = 0; ii < interface_count; ii++) {
1080                 pfe_get_gemac_if_proprties(g_pfe, np, ii, interface_count,
1081                                            &g_pfe->platform_data);
1082         }
1083
1084         pfe_lib_init(g_pfe->cbus_baseaddr, g_pfe->ddr_baseaddr,
1085                      g_pfe->ddr_phys_baseaddr, g_pfe->ddr_size);
1086
1087         PFE_PMD_INFO("CLASS version: %x", readl(CLASS_VERSION));
1088         PFE_PMD_INFO("TMU version: %x", readl(TMU_VERSION));
1089
1090         PFE_PMD_INFO("BMU1 version: %x", readl(BMU1_BASE_ADDR + BMU_VERSION));
1091         PFE_PMD_INFO("BMU2 version: %x", readl(BMU2_BASE_ADDR + BMU_VERSION));
1092
1093         PFE_PMD_INFO("EGPI1 version: %x", readl(EGPI1_BASE_ADDR + GPI_VERSION));
1094         PFE_PMD_INFO("EGPI2 version: %x", readl(EGPI2_BASE_ADDR + GPI_VERSION));
1095         PFE_PMD_INFO("HGPI version: %x", readl(HGPI_BASE_ADDR + GPI_VERSION));
1096
1097         PFE_PMD_INFO("HIF version: %x", readl(HIF_VERSION));
1098         PFE_PMD_INFO("HIF NOPCY version: %x", readl(HIF_NOCPY_VERSION));
1099
1100         cbus_emac_base[0] = EMAC1_BASE_ADDR;
1101         cbus_emac_base[1] = EMAC2_BASE_ADDR;
1102
1103         cbus_gpi_base[0] = EGPI1_BASE_ADDR;
1104         cbus_gpi_base[1] = EGPI2_BASE_ADDR;
1105
1106         rc = pfe_hif_lib_init(g_pfe);
1107         if (rc < 0)
1108                 goto err_hif_lib;
1109
1110         rc = pfe_hif_init(g_pfe);
1111         if (rc < 0)
1112                 goto err_hif;
1113         pfe_soc_version_get();
1114 eth_init:
1115         if (init_params.gem_id < 0)
1116                 gem_id = g_pfe->nb_devs;
1117         else
1118                 gem_id = init_params.gem_id;
1119
1120         PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)",
1121                 name, gem_id, init_params.gem_id);
1122
1123         rc = pfe_eth_init(vdev, g_pfe, gem_id);
1124         if (rc < 0)
1125                 goto err_eth;
1126         else
1127                 g_pfe->nb_devs++;
1128
1129         return 0;
1130
1131 err_eth:
1132         pfe_hif_exit(g_pfe);
1133
1134 err_hif:
1135         pfe_hif_lib_exit(g_pfe);
1136
1137 err_hif_lib:
1138 err_prop:
1139         munmap(g_pfe->cbus_baseaddr, cbus_size);
1140 err:
1141         rte_free(g_pfe);
1142         return rc;
1143 }
1144
1145 static int
1146 pmd_pfe_remove(struct rte_vdev_device *vdev)
1147 {
1148         const char *name;
1149         struct rte_eth_dev *eth_dev = NULL;
1150
1151         name = rte_vdev_device_name(vdev);
1152         if (name == NULL)
1153                 return -EINVAL;
1154
1155         PFE_PMD_INFO("Closing eventdev sw device %s", name);
1156
1157         if (!g_pfe)
1158                 return 0;
1159
1160         eth_dev = rte_eth_dev_allocated(name);
1161         if (eth_dev == NULL)
1162                 return -ENODEV;
1163
1164         pfe_eth_exit(eth_dev, g_pfe);
1165         munmap(g_pfe->cbus_baseaddr, g_pfe->cbus_size);
1166
1167         if (g_pfe->nb_devs == 0) {
1168                 pfe_hif_exit(g_pfe);
1169                 pfe_hif_lib_exit(g_pfe);
1170                 rte_free(g_pfe);
1171                 g_pfe = NULL;
1172         }
1173         return 0;
1174 }
1175
1176 static
1177 struct rte_vdev_driver pmd_pfe_drv = {
1178         .probe = pmd_pfe_probe,
1179         .remove = pmd_pfe_remove,
1180 };
1181
1182 RTE_PMD_REGISTER_VDEV(PFE_NAME_PMD, pmd_pfe_drv);
1183 RTE_PMD_REGISTER_PARAM_STRING(PFE_NAME_PMD, PFE_VDEV_GEM_ID_ARG "=<int> ");
1184
1185 RTE_INIT(pfe_pmd_init_log)
1186 {
1187         pfe_logtype_pmd = rte_log_register("pmd.net.pfe");
1188         if (pfe_logtype_pmd >= 0)
1189                 rte_log_set_level(pfe_logtype_pmd, RTE_LOG_NOTICE);
1190 }