246aff467248a3d4a80b7a2f4b44a0c8d78c734e
[dpdk.git] / drivers / net / enetc / enetc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4
5 #include <stdbool.h>
6 #include <ethdev_pci.h>
7 #include <rte_random.h>
8 #include <dpaax_iova_table.h>
9
10 #include "enetc_logs.h"
11 #include "enetc.h"
12
13 static int
14 enetc_dev_start(struct rte_eth_dev *dev)
15 {
16         struct enetc_eth_hw *hw =
17                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18         struct enetc_hw *enetc_hw = &hw->hw;
19         uint32_t val;
20
21         PMD_INIT_FUNC_TRACE();
22         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
23         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
24                       val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
25
26         /* Enable port */
27         val = enetc_port_rd(enetc_hw, ENETC_PMR);
28         enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
29
30         /* set auto-speed for RGMII */
31         if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
32                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
33                               ENETC_PM0_IFM_RGAUTO);
34                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
35                               ENETC_PM0_IFM_RGAUTO);
36         }
37         if (enetc_global_rd(enetc_hw,
38                             ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
39                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
40                               ENETC_PM0_IFM_XGMII);
41                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
42                               ENETC_PM0_IFM_XGMII);
43         }
44
45         return 0;
46 }
47
48 static int
49 enetc_dev_stop(struct rte_eth_dev *dev)
50 {
51         struct enetc_eth_hw *hw =
52                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53         struct enetc_hw *enetc_hw = &hw->hw;
54         uint32_t val;
55
56         PMD_INIT_FUNC_TRACE();
57         dev->data->dev_started = 0;
58         /* Disable port */
59         val = enetc_port_rd(enetc_hw, ENETC_PMR);
60         enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
61
62         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
63         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
64                       val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
65
66         return 0;
67 }
68
69 static const uint32_t *
70 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
71 {
72         static const uint32_t ptypes[] = {
73                 RTE_PTYPE_L2_ETHER,
74                 RTE_PTYPE_L3_IPV4,
75                 RTE_PTYPE_L3_IPV6,
76                 RTE_PTYPE_L4_TCP,
77                 RTE_PTYPE_L4_UDP,
78                 RTE_PTYPE_L4_SCTP,
79                 RTE_PTYPE_L4_ICMP,
80                 RTE_PTYPE_UNKNOWN
81         };
82
83         return ptypes;
84 }
85
86 /* return 0 means link status changed, -1 means not changed */
87 static int
88 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
89 {
90         struct enetc_eth_hw *hw =
91                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
92         struct enetc_hw *enetc_hw = &hw->hw;
93         struct rte_eth_link link;
94         uint32_t status;
95
96         PMD_INIT_FUNC_TRACE();
97
98         memset(&link, 0, sizeof(link));
99
100         status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
101
102         if (status & ENETC_LINK_MODE)
103                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
104         else
105                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
106
107         if (status & ENETC_LINK_STATUS)
108                 link.link_status = ETH_LINK_UP;
109         else
110                 link.link_status = ETH_LINK_DOWN;
111
112         switch (status & ENETC_LINK_SPEED_MASK) {
113         case ENETC_LINK_SPEED_1G:
114                 link.link_speed = ETH_SPEED_NUM_1G;
115                 break;
116
117         case ENETC_LINK_SPEED_100M:
118                 link.link_speed = ETH_SPEED_NUM_100M;
119                 break;
120
121         default:
122         case ENETC_LINK_SPEED_10M:
123                 link.link_speed = ETH_SPEED_NUM_10M;
124         }
125
126         return rte_eth_linkstatus_set(dev, &link);
127 }
128
129 static void
130 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
131 {
132         char buf[RTE_ETHER_ADDR_FMT_SIZE];
133
134         rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
135         ENETC_PMD_NOTICE("%s%s\n", name, buf);
136 }
137
138 static int
139 enetc_hardware_init(struct enetc_eth_hw *hw)
140 {
141         struct enetc_hw *enetc_hw = &hw->hw;
142         uint32_t *mac = (uint32_t *)hw->mac.addr;
143         uint32_t high_mac = 0;
144         uint16_t low_mac = 0;
145
146         PMD_INIT_FUNC_TRACE();
147         /* Calculating and storing the base HW addresses */
148         hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
149         hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
150
151         /* WA for Rx lock-up HW erratum */
152         enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1);
153
154         /* set ENETC transaction flags to coherent, don't allocate.
155          * BD writes merge with surrounding cache line data, frame data writes
156          * overwrite cache line.
157          */
158         enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT);
159
160         /* Enabling Station Interface */
161         enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
162
163         *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
164         high_mac = (uint32_t)*mac;
165         mac++;
166         *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
167         low_mac = (uint16_t)*mac;
168
169         if ((high_mac | low_mac) == 0) {
170                 char *first_byte;
171
172                 ENETC_PMD_NOTICE("MAC is not available for this SI, "
173                                 "set random MAC\n");
174                 mac = (uint32_t *)hw->mac.addr;
175                 *mac = (uint32_t)rte_rand();
176                 first_byte = (char *)mac;
177                 *first_byte &= 0xfe;    /* clear multicast bit */
178                 *first_byte |= 0x02;    /* set local assignment bit (IEEE802) */
179
180                 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
181                 mac++;
182                 *mac = (uint16_t)rte_rand();
183                 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
184                 print_ethaddr("New address: ",
185                               (const struct rte_ether_addr *)hw->mac.addr);
186         }
187
188         return 0;
189 }
190
191 static int
192 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
193                     struct rte_eth_dev_info *dev_info)
194 {
195         PMD_INIT_FUNC_TRACE();
196         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
197                 .nb_max = MAX_BD_COUNT,
198                 .nb_min = MIN_BD_COUNT,
199                 .nb_align = BD_ALIGN,
200         };
201         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
202                 .nb_max = MAX_BD_COUNT,
203                 .nb_min = MIN_BD_COUNT,
204                 .nb_align = BD_ALIGN,
205         };
206         dev_info->max_rx_queues = MAX_RX_RINGS;
207         dev_info->max_tx_queues = MAX_TX_RINGS;
208         dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
209         dev_info->rx_offload_capa =
210                 (DEV_RX_OFFLOAD_IPV4_CKSUM |
211                  DEV_RX_OFFLOAD_UDP_CKSUM |
212                  DEV_RX_OFFLOAD_TCP_CKSUM |
213                  DEV_RX_OFFLOAD_KEEP_CRC |
214                  DEV_RX_OFFLOAD_JUMBO_FRAME);
215
216         return 0;
217 }
218
219 static int
220 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
221 {
222         int size;
223
224         size = nb_desc * sizeof(struct enetc_swbd);
225         txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
226         if (txr->q_swbd == NULL)
227                 return -ENOMEM;
228
229         size = nb_desc * sizeof(struct enetc_tx_bd);
230         txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
231         if (txr->bd_base == NULL) {
232                 rte_free(txr->q_swbd);
233                 txr->q_swbd = NULL;
234                 return -ENOMEM;
235         }
236
237         txr->bd_count = nb_desc;
238         txr->next_to_clean = 0;
239         txr->next_to_use = 0;
240
241         return 0;
242 }
243
244 static void
245 enetc_free_bdr(struct enetc_bdr *rxr)
246 {
247         rte_free(rxr->q_swbd);
248         rte_free(rxr->bd_base);
249         rxr->q_swbd = NULL;
250         rxr->bd_base = NULL;
251 }
252
253 static void
254 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
255 {
256         int idx = tx_ring->index;
257         phys_addr_t bd_address;
258
259         bd_address = (phys_addr_t)
260                      rte_mem_virt2iova((const void *)tx_ring->bd_base);
261         enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
262                        lower_32_bits((uint64_t)bd_address));
263         enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
264                        upper_32_bits((uint64_t)bd_address));
265         enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
266                        ENETC_RTBLENR_LEN(tx_ring->bd_count));
267
268         enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
269         enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
270         tx_ring->tcir = (void *)((size_t)hw->reg +
271                         ENETC_BDR(TX, idx, ENETC_TBCIR));
272         tx_ring->tcisr = (void *)((size_t)hw->reg +
273                          ENETC_BDR(TX, idx, ENETC_TBCISR));
274 }
275
276 static int
277 enetc_tx_queue_setup(struct rte_eth_dev *dev,
278                      uint16_t queue_idx,
279                      uint16_t nb_desc,
280                      unsigned int socket_id __rte_unused,
281                      const struct rte_eth_txconf *tx_conf)
282 {
283         int err = 0;
284         struct enetc_bdr *tx_ring;
285         struct rte_eth_dev_data *data = dev->data;
286         struct enetc_eth_adapter *priv =
287                         ENETC_DEV_PRIVATE(data->dev_private);
288
289         PMD_INIT_FUNC_TRACE();
290         if (nb_desc > MAX_BD_COUNT)
291                 return -1;
292
293         tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
294         if (tx_ring == NULL) {
295                 ENETC_PMD_ERR("Failed to allocate TX ring memory");
296                 err = -ENOMEM;
297                 return -1;
298         }
299
300         err = enetc_alloc_txbdr(tx_ring, nb_desc);
301         if (err)
302                 goto fail;
303
304         tx_ring->index = queue_idx;
305         tx_ring->ndev = dev;
306         enetc_setup_txbdr(&priv->hw.hw, tx_ring);
307         data->tx_queues[queue_idx] = tx_ring;
308
309         if (!tx_conf->tx_deferred_start) {
310                 /* enable ring */
311                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
312                                ENETC_TBMR, ENETC_TBMR_EN);
313                 dev->data->tx_queue_state[tx_ring->index] =
314                                RTE_ETH_QUEUE_STATE_STARTED;
315         } else {
316                 dev->data->tx_queue_state[tx_ring->index] =
317                                RTE_ETH_QUEUE_STATE_STOPPED;
318         }
319
320         return 0;
321 fail:
322         rte_free(tx_ring);
323
324         return err;
325 }
326
327 static void
328 enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
329 {
330         void *txq = dev->data->tx_queues[qid];
331
332         if (txq == NULL)
333                 return;
334
335         struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
336         struct enetc_eth_hw *eth_hw =
337                 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
338         struct enetc_hw *hw;
339         struct enetc_swbd *tx_swbd;
340         int i;
341         uint32_t val;
342
343         /* Disable the ring */
344         hw = &eth_hw->hw;
345         val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
346         val &= (~ENETC_TBMR_EN);
347         enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
348
349         /* clean the ring*/
350         i = tx_ring->next_to_clean;
351         tx_swbd = &tx_ring->q_swbd[i];
352         while (tx_swbd->buffer_addr != NULL) {
353                 rte_pktmbuf_free(tx_swbd->buffer_addr);
354                 tx_swbd->buffer_addr = NULL;
355                 tx_swbd++;
356                 i++;
357                 if (unlikely(i == tx_ring->bd_count)) {
358                         i = 0;
359                         tx_swbd = &tx_ring->q_swbd[i];
360                 }
361         }
362
363         enetc_free_bdr(tx_ring);
364         rte_free(tx_ring);
365 }
366
367 static int
368 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
369                   uint16_t nb_rx_desc)
370 {
371         int size;
372
373         size = nb_rx_desc * sizeof(struct enetc_swbd);
374         rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
375         if (rxr->q_swbd == NULL)
376                 return -ENOMEM;
377
378         size = nb_rx_desc * sizeof(union enetc_rx_bd);
379         rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
380         if (rxr->bd_base == NULL) {
381                 rte_free(rxr->q_swbd);
382                 rxr->q_swbd = NULL;
383                 return -ENOMEM;
384         }
385
386         rxr->bd_count = nb_rx_desc;
387         rxr->next_to_clean = 0;
388         rxr->next_to_use = 0;
389         rxr->next_to_alloc = 0;
390
391         return 0;
392 }
393
394 static void
395 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
396                   struct rte_mempool *mb_pool)
397 {
398         int idx = rx_ring->index;
399         uint16_t buf_size;
400         phys_addr_t bd_address;
401
402         bd_address = (phys_addr_t)
403                      rte_mem_virt2iova((const void *)rx_ring->bd_base);
404         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
405                        lower_32_bits((uint64_t)bd_address));
406         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
407                        upper_32_bits((uint64_t)bd_address));
408         enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
409                        ENETC_RTBLENR_LEN(rx_ring->bd_count));
410
411         rx_ring->mb_pool = mb_pool;
412         rx_ring->rcir = (void *)((size_t)hw->reg +
413                         ENETC_BDR(RX, idx, ENETC_RBCIR));
414         enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
415         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
416                    RTE_PKTMBUF_HEADROOM);
417         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
418         enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
419 }
420
421 static int
422 enetc_rx_queue_setup(struct rte_eth_dev *dev,
423                      uint16_t rx_queue_id,
424                      uint16_t nb_rx_desc,
425                      unsigned int socket_id __rte_unused,
426                      const struct rte_eth_rxconf *rx_conf,
427                      struct rte_mempool *mb_pool)
428 {
429         int err = 0;
430         struct enetc_bdr *rx_ring;
431         struct rte_eth_dev_data *data =  dev->data;
432         struct enetc_eth_adapter *adapter =
433                         ENETC_DEV_PRIVATE(data->dev_private);
434         uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
435
436         PMD_INIT_FUNC_TRACE();
437         if (nb_rx_desc > MAX_BD_COUNT)
438                 return -1;
439
440         rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
441         if (rx_ring == NULL) {
442                 ENETC_PMD_ERR("Failed to allocate RX ring memory");
443                 err = -ENOMEM;
444                 return err;
445         }
446
447         err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
448         if (err)
449                 goto fail;
450
451         rx_ring->index = rx_queue_id;
452         rx_ring->ndev = dev;
453         enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
454         data->rx_queues[rx_queue_id] = rx_ring;
455
456         if (!rx_conf->rx_deferred_start) {
457                 /* enable ring */
458                 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
459                                ENETC_RBMR_EN);
460                 dev->data->rx_queue_state[rx_ring->index] =
461                                RTE_ETH_QUEUE_STATE_STARTED;
462         } else {
463                 dev->data->rx_queue_state[rx_ring->index] =
464                                RTE_ETH_QUEUE_STATE_STOPPED;
465         }
466
467         rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
468                                      RTE_ETHER_CRC_LEN : 0);
469
470         return 0;
471 fail:
472         rte_free(rx_ring);
473
474         return err;
475 }
476
477 static void
478 enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
479 {
480         void *rxq = dev->data->rx_queues[qid];
481
482         if (rxq == NULL)
483                 return;
484
485         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
486         struct enetc_eth_hw *eth_hw =
487                 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
488         struct enetc_swbd *q_swbd;
489         struct enetc_hw *hw;
490         uint32_t val;
491         int i;
492
493         /* Disable the ring */
494         hw = &eth_hw->hw;
495         val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
496         val &= (~ENETC_RBMR_EN);
497         enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
498
499         /* Clean the ring */
500         i = rx_ring->next_to_clean;
501         q_swbd = &rx_ring->q_swbd[i];
502         while (i != rx_ring->next_to_use) {
503                 rte_pktmbuf_free(q_swbd->buffer_addr);
504                 q_swbd->buffer_addr = NULL;
505                 q_swbd++;
506                 i++;
507                 if (unlikely(i == rx_ring->bd_count)) {
508                         i = 0;
509                         q_swbd = &rx_ring->q_swbd[i];
510                 }
511         }
512
513         enetc_free_bdr(rx_ring);
514         rte_free(rx_ring);
515 }
516
517 static
518 int enetc_stats_get(struct rte_eth_dev *dev,
519                     struct rte_eth_stats *stats)
520 {
521         struct enetc_eth_hw *hw =
522                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
523         struct enetc_hw *enetc_hw = &hw->hw;
524
525         /* Total received packets, bad + good, if we want to get counters of
526          * only good received packets then use ENETC_PM0_RFRM,
527          * ENETC_PM0_TFRM registers.
528          */
529         stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
530         stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
531         stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
532         stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
533         /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
534          * truncated packets
535          */
536         stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
537         stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
538         stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
539
540         return 0;
541 }
542
543 static int
544 enetc_stats_reset(struct rte_eth_dev *dev)
545 {
546         struct enetc_eth_hw *hw =
547                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
548         struct enetc_hw *enetc_hw = &hw->hw;
549
550         enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
551
552         return 0;
553 }
554
555 static int
556 enetc_dev_close(struct rte_eth_dev *dev)
557 {
558         uint16_t i;
559         int ret;
560
561         PMD_INIT_FUNC_TRACE();
562         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
563                 return 0;
564
565         ret = enetc_dev_stop(dev);
566
567         for (i = 0; i < dev->data->nb_rx_queues; i++) {
568                 enetc_rx_queue_release(dev, i);
569                 dev->data->rx_queues[i] = NULL;
570         }
571         dev->data->nb_rx_queues = 0;
572
573         for (i = 0; i < dev->data->nb_tx_queues; i++) {
574                 enetc_tx_queue_release(dev, i);
575                 dev->data->tx_queues[i] = NULL;
576         }
577         dev->data->nb_tx_queues = 0;
578
579         if (rte_eal_iova_mode() == RTE_IOVA_PA)
580                 dpaax_iova_table_depopulate();
581
582         return ret;
583 }
584
585 static int
586 enetc_promiscuous_enable(struct rte_eth_dev *dev)
587 {
588         struct enetc_eth_hw *hw =
589                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
590         struct enetc_hw *enetc_hw = &hw->hw;
591         uint32_t psipmr = 0;
592
593         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
594
595         /* Setting to enable promiscuous mode*/
596         psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
597
598         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
599
600         return 0;
601 }
602
603 static int
604 enetc_promiscuous_disable(struct rte_eth_dev *dev)
605 {
606         struct enetc_eth_hw *hw =
607                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608         struct enetc_hw *enetc_hw = &hw->hw;
609         uint32_t psipmr = 0;
610
611         /* Setting to disable promiscuous mode for SI0*/
612         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
613         psipmr &= (~ENETC_PSIPMR_SET_UP(0));
614
615         if (dev->data->all_multicast == 0)
616                 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
617
618         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
619
620         return 0;
621 }
622
623 static int
624 enetc_allmulticast_enable(struct rte_eth_dev *dev)
625 {
626         struct enetc_eth_hw *hw =
627                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
628         struct enetc_hw *enetc_hw = &hw->hw;
629         uint32_t psipmr = 0;
630
631         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
632
633         /* Setting to enable allmulticast mode for SI0*/
634         psipmr |= ENETC_PSIPMR_SET_MP(0);
635
636         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
637
638         return 0;
639 }
640
641 static int
642 enetc_allmulticast_disable(struct rte_eth_dev *dev)
643 {
644         struct enetc_eth_hw *hw =
645                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
646         struct enetc_hw *enetc_hw = &hw->hw;
647         uint32_t psipmr = 0;
648
649         if (dev->data->promiscuous == 1)
650                 return 0; /* must remain in all_multicast mode */
651
652         /* Setting to disable all multicast mode for SI0*/
653         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
654                                ~(ENETC_PSIPMR_SET_MP(0));
655
656         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
657
658         return 0;
659 }
660
661 static int
662 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
663 {
664         struct enetc_eth_hw *hw =
665                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
666         struct enetc_hw *enetc_hw = &hw->hw;
667         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
668
669         /* check that mtu is within the allowed range */
670         if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
671                 return -EINVAL;
672
673         /*
674          * Refuse mtu that requires the support of scattered packets
675          * when this feature has not been enabled before.
676          */
677         if (dev->data->min_rx_buf_size &&
678                 !dev->data->scattered_rx && frame_size >
679                 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
680                 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
681                 return -EINVAL;
682         }
683
684         if (frame_size > ENETC_ETH_MAX_LEN)
685                 dev->data->dev_conf.rxmode.offloads &=
686                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
687         else
688                 dev->data->dev_conf.rxmode.offloads &=
689                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
690
691         enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
692         enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
693
694         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
695
696         /*setting the MTU*/
697         enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
698                       ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
699
700         return 0;
701 }
702
703 static int
704 enetc_dev_configure(struct rte_eth_dev *dev)
705 {
706         struct enetc_eth_hw *hw =
707                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
708         struct enetc_hw *enetc_hw = &hw->hw;
709         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
710         uint64_t rx_offloads = eth_conf->rxmode.offloads;
711         uint32_t checksum = L3_CKSUM | L4_CKSUM;
712
713         PMD_INIT_FUNC_TRACE();
714
715         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
716                 uint32_t max_len;
717
718                 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
719
720                 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
721                               ENETC_SET_MAXFRM(max_len));
722                 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
723                               ENETC_MAC_MAXFRM_SIZE);
724                 enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
725                               2 * ENETC_MAC_MAXFRM_SIZE);
726                 dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
727                         RTE_ETHER_CRC_LEN;
728         }
729
730         if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
731                 int config;
732
733                 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
734                 config |= ENETC_PM0_CRC;
735                 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
736         }
737
738         if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
739                 checksum &= ~L3_CKSUM;
740
741         if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
742                 checksum &= ~L4_CKSUM;
743
744         enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
745
746
747         return 0;
748 }
749
750 static int
751 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
752 {
753         struct enetc_eth_adapter *priv =
754                         ENETC_DEV_PRIVATE(dev->data->dev_private);
755         struct enetc_bdr *rx_ring;
756         uint32_t rx_data;
757
758         rx_ring = dev->data->rx_queues[qidx];
759         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
760                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
761                                          ENETC_RBMR);
762                 rx_data = rx_data | ENETC_RBMR_EN;
763                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
764                                rx_data);
765                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
766         }
767
768         return 0;
769 }
770
771 static int
772 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
773 {
774         struct enetc_eth_adapter *priv =
775                         ENETC_DEV_PRIVATE(dev->data->dev_private);
776         struct enetc_bdr *rx_ring;
777         uint32_t rx_data;
778
779         rx_ring = dev->data->rx_queues[qidx];
780         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
781                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
782                                          ENETC_RBMR);
783                 rx_data = rx_data & (~ENETC_RBMR_EN);
784                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
785                                rx_data);
786                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
787         }
788
789         return 0;
790 }
791
792 static int
793 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
794 {
795         struct enetc_eth_adapter *priv =
796                         ENETC_DEV_PRIVATE(dev->data->dev_private);
797         struct enetc_bdr *tx_ring;
798         uint32_t tx_data;
799
800         tx_ring = dev->data->tx_queues[qidx];
801         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
802                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
803                                          ENETC_TBMR);
804                 tx_data = tx_data | ENETC_TBMR_EN;
805                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
806                                tx_data);
807                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
808         }
809
810         return 0;
811 }
812
813 static int
814 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
815 {
816         struct enetc_eth_adapter *priv =
817                         ENETC_DEV_PRIVATE(dev->data->dev_private);
818         struct enetc_bdr *tx_ring;
819         uint32_t tx_data;
820
821         tx_ring = dev->data->tx_queues[qidx];
822         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
823                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
824                                          ENETC_TBMR);
825                 tx_data = tx_data & (~ENETC_TBMR_EN);
826                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
827                                tx_data);
828                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
829         }
830
831         return 0;
832 }
833
834 /*
835  * The set of PCI devices this driver supports
836  */
837 static const struct rte_pci_id pci_id_enetc_map[] = {
838         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
839         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
840         { .vendor_id = 0, /* sentinel */ },
841 };
842
843 /* Features supported by this driver */
844 static const struct eth_dev_ops enetc_ops = {
845         .dev_configure        = enetc_dev_configure,
846         .dev_start            = enetc_dev_start,
847         .dev_stop             = enetc_dev_stop,
848         .dev_close            = enetc_dev_close,
849         .link_update          = enetc_link_update,
850         .stats_get            = enetc_stats_get,
851         .stats_reset          = enetc_stats_reset,
852         .promiscuous_enable   = enetc_promiscuous_enable,
853         .promiscuous_disable  = enetc_promiscuous_disable,
854         .allmulticast_enable  = enetc_allmulticast_enable,
855         .allmulticast_disable = enetc_allmulticast_disable,
856         .dev_infos_get        = enetc_dev_infos_get,
857         .mtu_set              = enetc_mtu_set,
858         .rx_queue_setup       = enetc_rx_queue_setup,
859         .rx_queue_start       = enetc_rx_queue_start,
860         .rx_queue_stop        = enetc_rx_queue_stop,
861         .rx_queue_release     = enetc_rx_queue_release,
862         .tx_queue_setup       = enetc_tx_queue_setup,
863         .tx_queue_start       = enetc_tx_queue_start,
864         .tx_queue_stop        = enetc_tx_queue_stop,
865         .tx_queue_release     = enetc_tx_queue_release,
866         .dev_supported_ptypes_get = enetc_supported_ptypes_get,
867 };
868
869 /**
870  * Initialisation of the enetc device
871  *
872  * @param eth_dev
873  *   - Pointer to the structure rte_eth_dev
874  *
875  * @return
876  *   - On success, zero.
877  *   - On failure, negative value.
878  */
879 static int
880 enetc_dev_init(struct rte_eth_dev *eth_dev)
881 {
882         int error = 0;
883         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
884         struct enetc_eth_hw *hw =
885                 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
886
887         PMD_INIT_FUNC_TRACE();
888         eth_dev->dev_ops = &enetc_ops;
889         eth_dev->rx_pkt_burst = &enetc_recv_pkts;
890         eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
891
892         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
893
894         /* Retrieving and storing the HW base address of device */
895         hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
896         hw->device_id = pci_dev->id.device_id;
897
898         error = enetc_hardware_init(hw);
899         if (error != 0) {
900                 ENETC_PMD_ERR("Hardware initialization failed");
901                 return -1;
902         }
903
904         /* Allocate memory for storing MAC addresses */
905         eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
906                                         RTE_ETHER_ADDR_LEN, 0);
907         if (!eth_dev->data->mac_addrs) {
908                 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
909                               "store MAC addresses",
910                               RTE_ETHER_ADDR_LEN * 1);
911                 error = -ENOMEM;
912                 return -1;
913         }
914
915         /* Copy the permanent MAC address */
916         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
917                         &eth_dev->data->mac_addrs[0]);
918
919         /* Set MTU */
920         enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
921                       ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
922         eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
923                 RTE_ETHER_CRC_LEN;
924
925         if (rte_eal_iova_mode() == RTE_IOVA_PA)
926                 dpaax_iova_table_populate();
927
928         ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
929                         eth_dev->data->port_id, pci_dev->id.vendor_id,
930                         pci_dev->id.device_id);
931         return 0;
932 }
933
934 static int
935 enetc_dev_uninit(struct rte_eth_dev *eth_dev)
936 {
937         PMD_INIT_FUNC_TRACE();
938
939         return enetc_dev_close(eth_dev);
940 }
941
942 static int
943 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
944                            struct rte_pci_device *pci_dev)
945 {
946         return rte_eth_dev_pci_generic_probe(pci_dev,
947                                              sizeof(struct enetc_eth_adapter),
948                                              enetc_dev_init);
949 }
950
951 static int
952 enetc_pci_remove(struct rte_pci_device *pci_dev)
953 {
954         return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
955 }
956
957 static struct rte_pci_driver rte_enetc_pmd = {
958         .id_table = pci_id_enetc_map,
959         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
960         .probe = enetc_pci_probe,
961         .remove = enetc_pci_remove,
962 };
963
964 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
965 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
966 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
967 RTE_LOG_REGISTER_DEFAULT(enetc_logtype_pmd, NOTICE);