ethdev: remove forcing stopped state upon close
[dpdk.git] / drivers / net / enetc / enetc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4
5 #include <stdbool.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_random.h>
8 #include <dpaax_iova_table.h>
9
10 #include "enetc_logs.h"
11 #include "enetc.h"
12
13 static int
14 enetc_dev_start(struct rte_eth_dev *dev)
15 {
16         struct enetc_eth_hw *hw =
17                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18         struct enetc_hw *enetc_hw = &hw->hw;
19         uint32_t val;
20
21         PMD_INIT_FUNC_TRACE();
22         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
23         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
24                       val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
25
26         /* Enable port */
27         val = enetc_port_rd(enetc_hw, ENETC_PMR);
28         enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
29
30         /* set auto-speed for RGMII */
31         if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
32                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
33                               ENETC_PM0_IFM_RGAUTO);
34                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
35                               ENETC_PM0_IFM_RGAUTO);
36         }
37         if (enetc_global_rd(enetc_hw,
38                             ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
39                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
40                               ENETC_PM0_IFM_XGMII);
41                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
42                               ENETC_PM0_IFM_XGMII);
43         }
44
45         return 0;
46 }
47
48 static void
49 enetc_dev_stop(struct rte_eth_dev *dev)
50 {
51         struct enetc_eth_hw *hw =
52                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53         struct enetc_hw *enetc_hw = &hw->hw;
54         uint32_t val;
55
56         PMD_INIT_FUNC_TRACE();
57         dev->data->dev_started = 0;
58         /* Disable port */
59         val = enetc_port_rd(enetc_hw, ENETC_PMR);
60         enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
61
62         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
63         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
64                       val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
65 }
66
67 static const uint32_t *
68 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
69 {
70         static const uint32_t ptypes[] = {
71                 RTE_PTYPE_L2_ETHER,
72                 RTE_PTYPE_L3_IPV4,
73                 RTE_PTYPE_L3_IPV6,
74                 RTE_PTYPE_L4_TCP,
75                 RTE_PTYPE_L4_UDP,
76                 RTE_PTYPE_L4_SCTP,
77                 RTE_PTYPE_L4_ICMP,
78                 RTE_PTYPE_UNKNOWN
79         };
80
81         return ptypes;
82 }
83
84 /* return 0 means link status changed, -1 means not changed */
85 static int
86 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
87 {
88         struct enetc_eth_hw *hw =
89                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
90         struct enetc_hw *enetc_hw = &hw->hw;
91         struct rte_eth_link link;
92         uint32_t status;
93
94         PMD_INIT_FUNC_TRACE();
95
96         memset(&link, 0, sizeof(link));
97
98         status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
99
100         if (status & ENETC_LINK_MODE)
101                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
102         else
103                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
104
105         if (status & ENETC_LINK_STATUS)
106                 link.link_status = ETH_LINK_UP;
107         else
108                 link.link_status = ETH_LINK_DOWN;
109
110         switch (status & ENETC_LINK_SPEED_MASK) {
111         case ENETC_LINK_SPEED_1G:
112                 link.link_speed = ETH_SPEED_NUM_1G;
113                 break;
114
115         case ENETC_LINK_SPEED_100M:
116                 link.link_speed = ETH_SPEED_NUM_100M;
117                 break;
118
119         default:
120         case ENETC_LINK_SPEED_10M:
121                 link.link_speed = ETH_SPEED_NUM_10M;
122         }
123
124         return rte_eth_linkstatus_set(dev, &link);
125 }
126
127 static void
128 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
129 {
130         char buf[RTE_ETHER_ADDR_FMT_SIZE];
131
132         rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
133         ENETC_PMD_NOTICE("%s%s\n", name, buf);
134 }
135
136 static int
137 enetc_hardware_init(struct enetc_eth_hw *hw)
138 {
139         struct enetc_hw *enetc_hw = &hw->hw;
140         uint32_t *mac = (uint32_t *)hw->mac.addr;
141         uint32_t high_mac = 0;
142         uint16_t low_mac = 0;
143
144         PMD_INIT_FUNC_TRACE();
145         /* Calculating and storing the base HW addresses */
146         hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
147         hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
148
149         /* WA for Rx lock-up HW erratum */
150         enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1);
151
152         /* set ENETC transaction flags to coherent, don't allocate.
153          * BD writes merge with surrounding cache line data, frame data writes
154          * overwrite cache line.
155          */
156         enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT);
157
158         /* Enabling Station Interface */
159         enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
160
161         *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
162         high_mac = (uint32_t)*mac;
163         mac++;
164         *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
165         low_mac = (uint16_t)*mac;
166
167         if ((high_mac | low_mac) == 0) {
168                 char *first_byte;
169
170                 ENETC_PMD_NOTICE("MAC is not available for this SI, "
171                                 "set random MAC\n");
172                 mac = (uint32_t *)hw->mac.addr;
173                 *mac = (uint32_t)rte_rand();
174                 first_byte = (char *)mac;
175                 *first_byte &= 0xfe;    /* clear multicast bit */
176                 *first_byte |= 0x02;    /* set local assignment bit (IEEE802) */
177
178                 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
179                 mac++;
180                 *mac = (uint16_t)rte_rand();
181                 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
182                 print_ethaddr("New address: ",
183                               (const struct rte_ether_addr *)hw->mac.addr);
184         }
185
186         return 0;
187 }
188
189 static int
190 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
191                     struct rte_eth_dev_info *dev_info)
192 {
193         PMD_INIT_FUNC_TRACE();
194         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
195                 .nb_max = MAX_BD_COUNT,
196                 .nb_min = MIN_BD_COUNT,
197                 .nb_align = BD_ALIGN,
198         };
199         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
200                 .nb_max = MAX_BD_COUNT,
201                 .nb_min = MIN_BD_COUNT,
202                 .nb_align = BD_ALIGN,
203         };
204         dev_info->max_rx_queues = MAX_RX_RINGS;
205         dev_info->max_tx_queues = MAX_TX_RINGS;
206         dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
207         dev_info->rx_offload_capa =
208                 (DEV_RX_OFFLOAD_IPV4_CKSUM |
209                  DEV_RX_OFFLOAD_UDP_CKSUM |
210                  DEV_RX_OFFLOAD_TCP_CKSUM |
211                  DEV_RX_OFFLOAD_KEEP_CRC |
212                  DEV_RX_OFFLOAD_JUMBO_FRAME);
213
214         return 0;
215 }
216
217 static int
218 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
219 {
220         int size;
221
222         size = nb_desc * sizeof(struct enetc_swbd);
223         txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
224         if (txr->q_swbd == NULL)
225                 return -ENOMEM;
226
227         size = nb_desc * sizeof(struct enetc_tx_bd);
228         txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
229         if (txr->bd_base == NULL) {
230                 rte_free(txr->q_swbd);
231                 txr->q_swbd = NULL;
232                 return -ENOMEM;
233         }
234
235         txr->bd_count = nb_desc;
236         txr->next_to_clean = 0;
237         txr->next_to_use = 0;
238
239         return 0;
240 }
241
242 static void
243 enetc_free_bdr(struct enetc_bdr *rxr)
244 {
245         rte_free(rxr->q_swbd);
246         rte_free(rxr->bd_base);
247         rxr->q_swbd = NULL;
248         rxr->bd_base = NULL;
249 }
250
251 static void
252 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
253 {
254         int idx = tx_ring->index;
255         phys_addr_t bd_address;
256
257         bd_address = (phys_addr_t)
258                      rte_mem_virt2iova((const void *)tx_ring->bd_base);
259         enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
260                        lower_32_bits((uint64_t)bd_address));
261         enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
262                        upper_32_bits((uint64_t)bd_address));
263         enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
264                        ENETC_RTBLENR_LEN(tx_ring->bd_count));
265
266         enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
267         enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
268         tx_ring->tcir = (void *)((size_t)hw->reg +
269                         ENETC_BDR(TX, idx, ENETC_TBCIR));
270         tx_ring->tcisr = (void *)((size_t)hw->reg +
271                          ENETC_BDR(TX, idx, ENETC_TBCISR));
272 }
273
274 static int
275 enetc_tx_queue_setup(struct rte_eth_dev *dev,
276                      uint16_t queue_idx,
277                      uint16_t nb_desc,
278                      unsigned int socket_id __rte_unused,
279                      const struct rte_eth_txconf *tx_conf)
280 {
281         int err = 0;
282         struct enetc_bdr *tx_ring;
283         struct rte_eth_dev_data *data = dev->data;
284         struct enetc_eth_adapter *priv =
285                         ENETC_DEV_PRIVATE(data->dev_private);
286
287         PMD_INIT_FUNC_TRACE();
288         if (nb_desc > MAX_BD_COUNT)
289                 return -1;
290
291         tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
292         if (tx_ring == NULL) {
293                 ENETC_PMD_ERR("Failed to allocate TX ring memory");
294                 err = -ENOMEM;
295                 return -1;
296         }
297
298         err = enetc_alloc_txbdr(tx_ring, nb_desc);
299         if (err)
300                 goto fail;
301
302         tx_ring->index = queue_idx;
303         tx_ring->ndev = dev;
304         enetc_setup_txbdr(&priv->hw.hw, tx_ring);
305         data->tx_queues[queue_idx] = tx_ring;
306
307         if (!tx_conf->tx_deferred_start) {
308                 /* enable ring */
309                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
310                                ENETC_TBMR, ENETC_TBMR_EN);
311                 dev->data->tx_queue_state[tx_ring->index] =
312                                RTE_ETH_QUEUE_STATE_STARTED;
313         } else {
314                 dev->data->tx_queue_state[tx_ring->index] =
315                                RTE_ETH_QUEUE_STATE_STOPPED;
316         }
317
318         return 0;
319 fail:
320         rte_free(tx_ring);
321
322         return err;
323 }
324
325 static void
326 enetc_tx_queue_release(void *txq)
327 {
328         if (txq == NULL)
329                 return;
330
331         struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
332         struct enetc_eth_hw *eth_hw =
333                 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
334         struct enetc_hw *hw;
335         struct enetc_swbd *tx_swbd;
336         int i;
337         uint32_t val;
338
339         /* Disable the ring */
340         hw = &eth_hw->hw;
341         val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
342         val &= (~ENETC_TBMR_EN);
343         enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
344
345         /* clean the ring*/
346         i = tx_ring->next_to_clean;
347         tx_swbd = &tx_ring->q_swbd[i];
348         while (tx_swbd->buffer_addr != NULL) {
349                 rte_pktmbuf_free(tx_swbd->buffer_addr);
350                 tx_swbd->buffer_addr = NULL;
351                 tx_swbd++;
352                 i++;
353                 if (unlikely(i == tx_ring->bd_count)) {
354                         i = 0;
355                         tx_swbd = &tx_ring->q_swbd[i];
356                 }
357         }
358
359         enetc_free_bdr(tx_ring);
360         rte_free(tx_ring);
361 }
362
363 static int
364 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
365                   uint16_t nb_rx_desc)
366 {
367         int size;
368
369         size = nb_rx_desc * sizeof(struct enetc_swbd);
370         rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
371         if (rxr->q_swbd == NULL)
372                 return -ENOMEM;
373
374         size = nb_rx_desc * sizeof(union enetc_rx_bd);
375         rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
376         if (rxr->bd_base == NULL) {
377                 rte_free(rxr->q_swbd);
378                 rxr->q_swbd = NULL;
379                 return -ENOMEM;
380         }
381
382         rxr->bd_count = nb_rx_desc;
383         rxr->next_to_clean = 0;
384         rxr->next_to_use = 0;
385         rxr->next_to_alloc = 0;
386
387         return 0;
388 }
389
390 static void
391 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
392                   struct rte_mempool *mb_pool)
393 {
394         int idx = rx_ring->index;
395         uint16_t buf_size;
396         phys_addr_t bd_address;
397
398         bd_address = (phys_addr_t)
399                      rte_mem_virt2iova((const void *)rx_ring->bd_base);
400         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
401                        lower_32_bits((uint64_t)bd_address));
402         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
403                        upper_32_bits((uint64_t)bd_address));
404         enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
405                        ENETC_RTBLENR_LEN(rx_ring->bd_count));
406
407         rx_ring->mb_pool = mb_pool;
408         rx_ring->rcir = (void *)((size_t)hw->reg +
409                         ENETC_BDR(RX, idx, ENETC_RBCIR));
410         enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
411         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
412                    RTE_PKTMBUF_HEADROOM);
413         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
414         enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
415 }
416
417 static int
418 enetc_rx_queue_setup(struct rte_eth_dev *dev,
419                      uint16_t rx_queue_id,
420                      uint16_t nb_rx_desc,
421                      unsigned int socket_id __rte_unused,
422                      const struct rte_eth_rxconf *rx_conf,
423                      struct rte_mempool *mb_pool)
424 {
425         int err = 0;
426         struct enetc_bdr *rx_ring;
427         struct rte_eth_dev_data *data =  dev->data;
428         struct enetc_eth_adapter *adapter =
429                         ENETC_DEV_PRIVATE(data->dev_private);
430         uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
431
432         PMD_INIT_FUNC_TRACE();
433         if (nb_rx_desc > MAX_BD_COUNT)
434                 return -1;
435
436         rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
437         if (rx_ring == NULL) {
438                 ENETC_PMD_ERR("Failed to allocate RX ring memory");
439                 err = -ENOMEM;
440                 return err;
441         }
442
443         err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
444         if (err)
445                 goto fail;
446
447         rx_ring->index = rx_queue_id;
448         rx_ring->ndev = dev;
449         enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
450         data->rx_queues[rx_queue_id] = rx_ring;
451
452         if (!rx_conf->rx_deferred_start) {
453                 /* enable ring */
454                 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
455                                ENETC_RBMR_EN);
456                 dev->data->rx_queue_state[rx_ring->index] =
457                                RTE_ETH_QUEUE_STATE_STARTED;
458         } else {
459                 dev->data->rx_queue_state[rx_ring->index] =
460                                RTE_ETH_QUEUE_STATE_STOPPED;
461         }
462
463         rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
464                                      RTE_ETHER_CRC_LEN : 0);
465
466         return 0;
467 fail:
468         rte_free(rx_ring);
469
470         return err;
471 }
472
473 static void
474 enetc_rx_queue_release(void *rxq)
475 {
476         if (rxq == NULL)
477                 return;
478
479         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
480         struct enetc_eth_hw *eth_hw =
481                 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
482         struct enetc_swbd *q_swbd;
483         struct enetc_hw *hw;
484         uint32_t val;
485         int i;
486
487         /* Disable the ring */
488         hw = &eth_hw->hw;
489         val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
490         val &= (~ENETC_RBMR_EN);
491         enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
492
493         /* Clean the ring */
494         i = rx_ring->next_to_clean;
495         q_swbd = &rx_ring->q_swbd[i];
496         while (i != rx_ring->next_to_use) {
497                 rte_pktmbuf_free(q_swbd->buffer_addr);
498                 q_swbd->buffer_addr = NULL;
499                 q_swbd++;
500                 i++;
501                 if (unlikely(i == rx_ring->bd_count)) {
502                         i = 0;
503                         q_swbd = &rx_ring->q_swbd[i];
504                 }
505         }
506
507         enetc_free_bdr(rx_ring);
508         rte_free(rx_ring);
509 }
510
511 static
512 int enetc_stats_get(struct rte_eth_dev *dev,
513                     struct rte_eth_stats *stats)
514 {
515         struct enetc_eth_hw *hw =
516                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
517         struct enetc_hw *enetc_hw = &hw->hw;
518
519         /* Total received packets, bad + good, if we want to get counters of
520          * only good received packets then use ENETC_PM0_RFRM,
521          * ENETC_PM0_TFRM registers.
522          */
523         stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
524         stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
525         stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
526         stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
527         /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
528          * truncated packets
529          */
530         stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
531         stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
532         stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
533
534         return 0;
535 }
536
537 static int
538 enetc_stats_reset(struct rte_eth_dev *dev)
539 {
540         struct enetc_eth_hw *hw =
541                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
542         struct enetc_hw *enetc_hw = &hw->hw;
543
544         enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
545
546         return 0;
547 }
548
549 static int
550 enetc_dev_close(struct rte_eth_dev *dev)
551 {
552         uint16_t i;
553
554         PMD_INIT_FUNC_TRACE();
555         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
556                 return 0;
557
558         enetc_dev_stop(dev);
559
560         for (i = 0; i < dev->data->nb_rx_queues; i++) {
561                 enetc_rx_queue_release(dev->data->rx_queues[i]);
562                 dev->data->rx_queues[i] = NULL;
563         }
564         dev->data->nb_rx_queues = 0;
565
566         for (i = 0; i < dev->data->nb_tx_queues; i++) {
567                 enetc_tx_queue_release(dev->data->tx_queues[i]);
568                 dev->data->tx_queues[i] = NULL;
569         }
570         dev->data->nb_tx_queues = 0;
571
572         if (rte_eal_iova_mode() == RTE_IOVA_PA)
573                 dpaax_iova_table_depopulate();
574
575         return 0;
576 }
577
578 static int
579 enetc_promiscuous_enable(struct rte_eth_dev *dev)
580 {
581         struct enetc_eth_hw *hw =
582                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
583         struct enetc_hw *enetc_hw = &hw->hw;
584         uint32_t psipmr = 0;
585
586         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
587
588         /* Setting to enable promiscuous mode*/
589         psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
590
591         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
592
593         return 0;
594 }
595
596 static int
597 enetc_promiscuous_disable(struct rte_eth_dev *dev)
598 {
599         struct enetc_eth_hw *hw =
600                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
601         struct enetc_hw *enetc_hw = &hw->hw;
602         uint32_t psipmr = 0;
603
604         /* Setting to disable promiscuous mode for SI0*/
605         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
606         psipmr &= (~ENETC_PSIPMR_SET_UP(0));
607
608         if (dev->data->all_multicast == 0)
609                 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
610
611         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
612
613         return 0;
614 }
615
616 static int
617 enetc_allmulticast_enable(struct rte_eth_dev *dev)
618 {
619         struct enetc_eth_hw *hw =
620                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
621         struct enetc_hw *enetc_hw = &hw->hw;
622         uint32_t psipmr = 0;
623
624         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
625
626         /* Setting to enable allmulticast mode for SI0*/
627         psipmr |= ENETC_PSIPMR_SET_MP(0);
628
629         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
630
631         return 0;
632 }
633
634 static int
635 enetc_allmulticast_disable(struct rte_eth_dev *dev)
636 {
637         struct enetc_eth_hw *hw =
638                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
639         struct enetc_hw *enetc_hw = &hw->hw;
640         uint32_t psipmr = 0;
641
642         if (dev->data->promiscuous == 1)
643                 return 0; /* must remain in all_multicast mode */
644
645         /* Setting to disable all multicast mode for SI0*/
646         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
647                                ~(ENETC_PSIPMR_SET_MP(0));
648
649         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
650
651         return 0;
652 }
653
654 static int
655 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
656 {
657         struct enetc_eth_hw *hw =
658                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
659         struct enetc_hw *enetc_hw = &hw->hw;
660         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
661
662         /* check that mtu is within the allowed range */
663         if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
664                 return -EINVAL;
665
666         /*
667          * Refuse mtu that requires the support of scattered packets
668          * when this feature has not been enabled before.
669          */
670         if (dev->data->min_rx_buf_size &&
671                 !dev->data->scattered_rx && frame_size >
672                 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
673                 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
674                 return -EINVAL;
675         }
676
677         if (frame_size > RTE_ETHER_MAX_LEN)
678                 dev->data->dev_conf.rxmode.offloads &=
679                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
680         else
681                 dev->data->dev_conf.rxmode.offloads &=
682                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
683
684         enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
685         enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
686
687         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
688
689         /*setting the MTU*/
690         enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
691                       ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
692
693         return 0;
694 }
695
696 static int
697 enetc_dev_configure(struct rte_eth_dev *dev)
698 {
699         struct enetc_eth_hw *hw =
700                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
701         struct enetc_hw *enetc_hw = &hw->hw;
702         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
703         uint64_t rx_offloads = eth_conf->rxmode.offloads;
704         uint32_t checksum = L3_CKSUM | L4_CKSUM;
705
706         PMD_INIT_FUNC_TRACE();
707
708         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
709                 uint32_t max_len;
710
711                 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
712
713                 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
714                               ENETC_SET_MAXFRM(max_len));
715                 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
716                               ENETC_MAC_MAXFRM_SIZE);
717                 enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
718                               2 * ENETC_MAC_MAXFRM_SIZE);
719                 dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
720                         RTE_ETHER_CRC_LEN;
721         }
722
723         if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
724                 int config;
725
726                 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
727                 config |= ENETC_PM0_CRC;
728                 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
729         }
730
731         if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
732                 checksum &= ~L3_CKSUM;
733
734         if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
735                 checksum &= ~L4_CKSUM;
736
737         enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
738
739
740         return 0;
741 }
742
743 static int
744 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
745 {
746         struct enetc_eth_adapter *priv =
747                         ENETC_DEV_PRIVATE(dev->data->dev_private);
748         struct enetc_bdr *rx_ring;
749         uint32_t rx_data;
750
751         rx_ring = dev->data->rx_queues[qidx];
752         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
753                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
754                                          ENETC_RBMR);
755                 rx_data = rx_data | ENETC_RBMR_EN;
756                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
757                                rx_data);
758                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
759         }
760
761         return 0;
762 }
763
764 static int
765 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
766 {
767         struct enetc_eth_adapter *priv =
768                         ENETC_DEV_PRIVATE(dev->data->dev_private);
769         struct enetc_bdr *rx_ring;
770         uint32_t rx_data;
771
772         rx_ring = dev->data->rx_queues[qidx];
773         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
774                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
775                                          ENETC_RBMR);
776                 rx_data = rx_data & (~ENETC_RBMR_EN);
777                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
778                                rx_data);
779                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
780         }
781
782         return 0;
783 }
784
785 static int
786 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
787 {
788         struct enetc_eth_adapter *priv =
789                         ENETC_DEV_PRIVATE(dev->data->dev_private);
790         struct enetc_bdr *tx_ring;
791         uint32_t tx_data;
792
793         tx_ring = dev->data->tx_queues[qidx];
794         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
795                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
796                                          ENETC_TBMR);
797                 tx_data = tx_data | ENETC_TBMR_EN;
798                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
799                                tx_data);
800                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
801         }
802
803         return 0;
804 }
805
806 static int
807 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
808 {
809         struct enetc_eth_adapter *priv =
810                         ENETC_DEV_PRIVATE(dev->data->dev_private);
811         struct enetc_bdr *tx_ring;
812         uint32_t tx_data;
813
814         tx_ring = dev->data->tx_queues[qidx];
815         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
816                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
817                                          ENETC_TBMR);
818                 tx_data = tx_data & (~ENETC_TBMR_EN);
819                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
820                                tx_data);
821                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
822         }
823
824         return 0;
825 }
826
827 /*
828  * The set of PCI devices this driver supports
829  */
830 static const struct rte_pci_id pci_id_enetc_map[] = {
831         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
832         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
833         { .vendor_id = 0, /* sentinel */ },
834 };
835
836 /* Features supported by this driver */
837 static const struct eth_dev_ops enetc_ops = {
838         .dev_configure        = enetc_dev_configure,
839         .dev_start            = enetc_dev_start,
840         .dev_stop             = enetc_dev_stop,
841         .dev_close            = enetc_dev_close,
842         .link_update          = enetc_link_update,
843         .stats_get            = enetc_stats_get,
844         .stats_reset          = enetc_stats_reset,
845         .promiscuous_enable   = enetc_promiscuous_enable,
846         .promiscuous_disable  = enetc_promiscuous_disable,
847         .allmulticast_enable  = enetc_allmulticast_enable,
848         .allmulticast_disable = enetc_allmulticast_disable,
849         .dev_infos_get        = enetc_dev_infos_get,
850         .mtu_set              = enetc_mtu_set,
851         .rx_queue_setup       = enetc_rx_queue_setup,
852         .rx_queue_start       = enetc_rx_queue_start,
853         .rx_queue_stop        = enetc_rx_queue_stop,
854         .rx_queue_release     = enetc_rx_queue_release,
855         .tx_queue_setup       = enetc_tx_queue_setup,
856         .tx_queue_start       = enetc_tx_queue_start,
857         .tx_queue_stop        = enetc_tx_queue_stop,
858         .tx_queue_release     = enetc_tx_queue_release,
859         .dev_supported_ptypes_get = enetc_supported_ptypes_get,
860 };
861
862 /**
863  * Initialisation of the enetc device
864  *
865  * @param eth_dev
866  *   - Pointer to the structure rte_eth_dev
867  *
868  * @return
869  *   - On success, zero.
870  *   - On failure, negative value.
871  */
872 static int
873 enetc_dev_init(struct rte_eth_dev *eth_dev)
874 {
875         int error = 0;
876         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
877         struct enetc_eth_hw *hw =
878                 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
879
880         PMD_INIT_FUNC_TRACE();
881         eth_dev->dev_ops = &enetc_ops;
882         eth_dev->rx_pkt_burst = &enetc_recv_pkts;
883         eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
884
885         /* Retrieving and storing the HW base address of device */
886         hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
887         hw->device_id = pci_dev->id.device_id;
888
889         error = enetc_hardware_init(hw);
890         if (error != 0) {
891                 ENETC_PMD_ERR("Hardware initialization failed");
892                 return -1;
893         }
894
895         /* Allocate memory for storing MAC addresses */
896         eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
897                                         RTE_ETHER_ADDR_LEN, 0);
898         if (!eth_dev->data->mac_addrs) {
899                 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
900                               "store MAC addresses",
901                               RTE_ETHER_ADDR_LEN * 1);
902                 error = -ENOMEM;
903                 return -1;
904         }
905
906         /* Copy the permanent MAC address */
907         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
908                         &eth_dev->data->mac_addrs[0]);
909
910         /* Set MTU */
911         enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
912                       ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
913         eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
914                 RTE_ETHER_CRC_LEN;
915
916         if (rte_eal_iova_mode() == RTE_IOVA_PA)
917                 dpaax_iova_table_populate();
918
919         ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
920                         eth_dev->data->port_id, pci_dev->id.vendor_id,
921                         pci_dev->id.device_id);
922         return 0;
923 }
924
925 static int
926 enetc_dev_uninit(struct rte_eth_dev *eth_dev)
927 {
928         PMD_INIT_FUNC_TRACE();
929
930         return enetc_dev_close(eth_dev);
931 }
932
933 static int
934 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
935                            struct rte_pci_device *pci_dev)
936 {
937         return rte_eth_dev_pci_generic_probe(pci_dev,
938                                              sizeof(struct enetc_eth_adapter),
939                                              enetc_dev_init);
940 }
941
942 static int
943 enetc_pci_remove(struct rte_pci_device *pci_dev)
944 {
945         return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
946 }
947
948 static struct rte_pci_driver rte_enetc_pmd = {
949         .id_table = pci_id_enetc_map,
950         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
951         .probe = enetc_pci_probe,
952         .remove = enetc_pci_remove,
953 };
954
955 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
956 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
957 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
958 RTE_LOG_REGISTER(enetc_logtype_pmd, pmd.net.enetc, NOTICE);