net/enetc: enable CRC offload
[dpdk.git] / drivers / net / enetc / enetc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <stdbool.h>
6 #include <rte_ethdev_pci.h>
7
8 #include "enetc_logs.h"
9 #include "enetc.h"
10
11 int enetc_logtype_pmd;
12
13 static int
14 enetc_dev_start(struct rte_eth_dev *dev)
15 {
16         struct enetc_eth_hw *hw =
17                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18         struct enetc_hw *enetc_hw = &hw->hw;
19         uint32_t val;
20
21         PMD_INIT_FUNC_TRACE();
22         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
23         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
24                       val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
25
26         /* Enable port */
27         val = enetc_port_rd(enetc_hw, ENETC_PMR);
28         enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
29
30         /* set auto-speed for RGMII */
31         if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
32                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
33                               ENETC_PM0_IFM_RGAUTO);
34                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
35                               ENETC_PM0_IFM_RGAUTO);
36         }
37         if (enetc_global_rd(enetc_hw,
38                             ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
39                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
40                               ENETC_PM0_IFM_XGMII);
41                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
42                               ENETC_PM0_IFM_XGMII);
43         }
44
45         return 0;
46 }
47
48 static void
49 enetc_dev_stop(struct rte_eth_dev *dev)
50 {
51         struct enetc_eth_hw *hw =
52                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53         struct enetc_hw *enetc_hw = &hw->hw;
54         uint32_t val;
55
56         PMD_INIT_FUNC_TRACE();
57         /* Disable port */
58         val = enetc_port_rd(enetc_hw, ENETC_PMR);
59         enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
60
61         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
62         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
63                       val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
64 }
65
66 static const uint32_t *
67 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
68 {
69         static const uint32_t ptypes[] = {
70                 RTE_PTYPE_L2_ETHER,
71                 RTE_PTYPE_L3_IPV4,
72                 RTE_PTYPE_L3_IPV6,
73                 RTE_PTYPE_L4_TCP,
74                 RTE_PTYPE_L4_UDP,
75                 RTE_PTYPE_L4_SCTP,
76                 RTE_PTYPE_L4_ICMP,
77                 RTE_PTYPE_UNKNOWN
78         };
79
80         return ptypes;
81 }
82
83 /* return 0 means link status changed, -1 means not changed */
84 static int
85 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
86 {
87         struct enetc_eth_hw *hw =
88                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
89         struct enetc_hw *enetc_hw = &hw->hw;
90         struct rte_eth_link link;
91         uint32_t status;
92
93         PMD_INIT_FUNC_TRACE();
94
95         memset(&link, 0, sizeof(link));
96
97         status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
98
99         if (status & ENETC_LINK_MODE)
100                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
101         else
102                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
103
104         if (status & ENETC_LINK_STATUS)
105                 link.link_status = ETH_LINK_UP;
106         else
107                 link.link_status = ETH_LINK_DOWN;
108
109         switch (status & ENETC_LINK_SPEED_MASK) {
110         case ENETC_LINK_SPEED_1G:
111                 link.link_speed = ETH_SPEED_NUM_1G;
112                 break;
113
114         case ENETC_LINK_SPEED_100M:
115                 link.link_speed = ETH_SPEED_NUM_100M;
116                 break;
117
118         default:
119         case ENETC_LINK_SPEED_10M:
120                 link.link_speed = ETH_SPEED_NUM_10M;
121         }
122
123         return rte_eth_linkstatus_set(dev, &link);
124 }
125
126 static int
127 enetc_hardware_init(struct enetc_eth_hw *hw)
128 {
129         struct enetc_hw *enetc_hw = &hw->hw;
130         uint32_t *mac = (uint32_t *)hw->mac.addr;
131
132         PMD_INIT_FUNC_TRACE();
133         /* Calculating and storing the base HW addresses */
134         hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
135         hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
136
137         /* Enabling Station Interface */
138         enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
139
140         *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
141         mac++;
142         *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
143
144         return 0;
145 }
146
147 static void
148 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
149                     struct rte_eth_dev_info *dev_info)
150 {
151         PMD_INIT_FUNC_TRACE();
152         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
153                 .nb_max = MAX_BD_COUNT,
154                 .nb_min = MIN_BD_COUNT,
155                 .nb_align = BD_ALIGN,
156         };
157         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
158                 .nb_max = MAX_BD_COUNT,
159                 .nb_min = MIN_BD_COUNT,
160                 .nb_align = BD_ALIGN,
161         };
162         dev_info->max_rx_queues = MAX_RX_RINGS;
163         dev_info->max_tx_queues = MAX_TX_RINGS;
164         dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
165         dev_info->rx_offload_capa =
166                 (DEV_RX_OFFLOAD_KEEP_CRC |
167                  DEV_RX_OFFLOAD_JUMBO_FRAME);
168 }
169
170 static int
171 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
172 {
173         int size;
174
175         size = nb_desc * sizeof(struct enetc_swbd);
176         txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
177         if (txr->q_swbd == NULL)
178                 return -ENOMEM;
179
180         size = nb_desc * sizeof(struct enetc_tx_bd);
181         txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
182         if (txr->bd_base == NULL) {
183                 rte_free(txr->q_swbd);
184                 txr->q_swbd = NULL;
185                 return -ENOMEM;
186         }
187
188         txr->bd_count = nb_desc;
189         txr->next_to_clean = 0;
190         txr->next_to_use = 0;
191
192         return 0;
193 }
194
195 static void
196 enetc_free_bdr(struct enetc_bdr *rxr)
197 {
198         rte_free(rxr->q_swbd);
199         rte_free(rxr->bd_base);
200         rxr->q_swbd = NULL;
201         rxr->bd_base = NULL;
202 }
203
204 static void
205 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
206 {
207         int idx = tx_ring->index;
208         phys_addr_t bd_address;
209
210         bd_address = (phys_addr_t)
211                      rte_mem_virt2iova((const void *)tx_ring->bd_base);
212         enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
213                        lower_32_bits((uint64_t)bd_address));
214         enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
215                        upper_32_bits((uint64_t)bd_address));
216         enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
217                        ENETC_RTBLENR_LEN(tx_ring->bd_count));
218
219         enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
220         enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
221         tx_ring->tcir = (void *)((size_t)hw->reg +
222                         ENETC_BDR(TX, idx, ENETC_TBCIR));
223         tx_ring->tcisr = (void *)((size_t)hw->reg +
224                          ENETC_BDR(TX, idx, ENETC_TBCISR));
225 }
226
227 static int
228 enetc_tx_queue_setup(struct rte_eth_dev *dev,
229                      uint16_t queue_idx,
230                      uint16_t nb_desc,
231                      unsigned int socket_id __rte_unused,
232                      const struct rte_eth_txconf *tx_conf)
233 {
234         int err = 0;
235         struct enetc_bdr *tx_ring;
236         struct rte_eth_dev_data *data = dev->data;
237         struct enetc_eth_adapter *priv =
238                         ENETC_DEV_PRIVATE(data->dev_private);
239
240         PMD_INIT_FUNC_TRACE();
241         if (nb_desc > MAX_BD_COUNT)
242                 return -1;
243
244         tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
245         if (tx_ring == NULL) {
246                 ENETC_PMD_ERR("Failed to allocate TX ring memory");
247                 err = -ENOMEM;
248                 return -1;
249         }
250
251         err = enetc_alloc_txbdr(tx_ring, nb_desc);
252         if (err)
253                 goto fail;
254
255         tx_ring->index = queue_idx;
256         tx_ring->ndev = dev;
257         enetc_setup_txbdr(&priv->hw.hw, tx_ring);
258         data->tx_queues[queue_idx] = tx_ring;
259
260         if (!tx_conf->tx_deferred_start) {
261                 /* enable ring */
262                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
263                                ENETC_TBMR, ENETC_TBMR_EN);
264                 dev->data->tx_queue_state[tx_ring->index] =
265                                RTE_ETH_QUEUE_STATE_STARTED;
266         } else {
267                 dev->data->tx_queue_state[tx_ring->index] =
268                                RTE_ETH_QUEUE_STATE_STOPPED;
269         }
270
271         return 0;
272 fail:
273         rte_free(tx_ring);
274
275         return err;
276 }
277
278 static void
279 enetc_tx_queue_release(void *txq)
280 {
281         if (txq == NULL)
282                 return;
283
284         struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
285         struct enetc_eth_hw *eth_hw =
286                 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
287         struct enetc_hw *hw;
288         struct enetc_swbd *tx_swbd;
289         int i;
290         uint32_t val;
291
292         /* Disable the ring */
293         hw = &eth_hw->hw;
294         val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
295         val &= (~ENETC_TBMR_EN);
296         enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
297
298         /* clean the ring*/
299         i = tx_ring->next_to_clean;
300         tx_swbd = &tx_ring->q_swbd[i];
301         while (tx_swbd->buffer_addr != NULL) {
302                 rte_pktmbuf_free(tx_swbd->buffer_addr);
303                 tx_swbd->buffer_addr = NULL;
304                 tx_swbd++;
305                 i++;
306                 if (unlikely(i == tx_ring->bd_count)) {
307                         i = 0;
308                         tx_swbd = &tx_ring->q_swbd[i];
309                 }
310         }
311
312         enetc_free_bdr(tx_ring);
313         rte_free(tx_ring);
314 }
315
316 static int
317 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
318                   uint16_t nb_rx_desc)
319 {
320         int size;
321
322         size = nb_rx_desc * sizeof(struct enetc_swbd);
323         rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
324         if (rxr->q_swbd == NULL)
325                 return -ENOMEM;
326
327         size = nb_rx_desc * sizeof(union enetc_rx_bd);
328         rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
329         if (rxr->bd_base == NULL) {
330                 rte_free(rxr->q_swbd);
331                 rxr->q_swbd = NULL;
332                 return -ENOMEM;
333         }
334
335         rxr->bd_count = nb_rx_desc;
336         rxr->next_to_clean = 0;
337         rxr->next_to_use = 0;
338         rxr->next_to_alloc = 0;
339
340         return 0;
341 }
342
343 static void
344 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
345                   struct rte_mempool *mb_pool)
346 {
347         int idx = rx_ring->index;
348         uint16_t buf_size;
349         phys_addr_t bd_address;
350
351         bd_address = (phys_addr_t)
352                      rte_mem_virt2iova((const void *)rx_ring->bd_base);
353         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
354                        lower_32_bits((uint64_t)bd_address));
355         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
356                        upper_32_bits((uint64_t)bd_address));
357         enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
358                        ENETC_RTBLENR_LEN(rx_ring->bd_count));
359
360         rx_ring->mb_pool = mb_pool;
361         rx_ring->rcir = (void *)((size_t)hw->reg +
362                         ENETC_BDR(RX, idx, ENETC_RBCIR));
363         enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
364         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
365                    RTE_PKTMBUF_HEADROOM);
366         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
367         enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
368 }
369
370 static int
371 enetc_rx_queue_setup(struct rte_eth_dev *dev,
372                      uint16_t rx_queue_id,
373                      uint16_t nb_rx_desc,
374                      unsigned int socket_id __rte_unused,
375                      const struct rte_eth_rxconf *rx_conf,
376                      struct rte_mempool *mb_pool)
377 {
378         int err = 0;
379         struct enetc_bdr *rx_ring;
380         struct rte_eth_dev_data *data =  dev->data;
381         struct enetc_eth_adapter *adapter =
382                         ENETC_DEV_PRIVATE(data->dev_private);
383         uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
384
385         PMD_INIT_FUNC_TRACE();
386         if (nb_rx_desc > MAX_BD_COUNT)
387                 return -1;
388
389         rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
390         if (rx_ring == NULL) {
391                 ENETC_PMD_ERR("Failed to allocate RX ring memory");
392                 err = -ENOMEM;
393                 return err;
394         }
395
396         err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
397         if (err)
398                 goto fail;
399
400         rx_ring->index = rx_queue_id;
401         rx_ring->ndev = dev;
402         enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
403         data->rx_queues[rx_queue_id] = rx_ring;
404
405         if (!rx_conf->rx_deferred_start) {
406                 /* enable ring */
407                 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
408                                ENETC_RBMR_EN);
409                 dev->data->rx_queue_state[rx_ring->index] =
410                                RTE_ETH_QUEUE_STATE_STARTED;
411         } else {
412                 dev->data->rx_queue_state[rx_ring->index] =
413                                RTE_ETH_QUEUE_STATE_STOPPED;
414         }
415
416         rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
417                                      ETHER_CRC_LEN : 0);
418
419         return 0;
420 fail:
421         rte_free(rx_ring);
422
423         return err;
424 }
425
426 static void
427 enetc_rx_queue_release(void *rxq)
428 {
429         if (rxq == NULL)
430                 return;
431
432         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
433         struct enetc_eth_hw *eth_hw =
434                 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
435         struct enetc_swbd *q_swbd;
436         struct enetc_hw *hw;
437         uint32_t val;
438         int i;
439
440         /* Disable the ring */
441         hw = &eth_hw->hw;
442         val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
443         val &= (~ENETC_RBMR_EN);
444         enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
445
446         /* Clean the ring */
447         i = rx_ring->next_to_clean;
448         q_swbd = &rx_ring->q_swbd[i];
449         while (i != rx_ring->next_to_use) {
450                 rte_pktmbuf_free(q_swbd->buffer_addr);
451                 q_swbd->buffer_addr = NULL;
452                 q_swbd++;
453                 i++;
454                 if (unlikely(i == rx_ring->bd_count)) {
455                         i = 0;
456                         q_swbd = &rx_ring->q_swbd[i];
457                 }
458         }
459
460         enetc_free_bdr(rx_ring);
461         rte_free(rx_ring);
462 }
463
464 static
465 int enetc_stats_get(struct rte_eth_dev *dev,
466                     struct rte_eth_stats *stats)
467 {
468         struct enetc_eth_hw *hw =
469                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
470         struct enetc_hw *enetc_hw = &hw->hw;
471
472         /* Total received packets, bad + good, if we want to get counters of
473          * only good received packets then use ENETC_PM0_RFRM,
474          * ENETC_PM0_TFRM registers.
475          */
476         stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
477         stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
478         stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
479         stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
480         /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
481          * truncated packets
482          */
483         stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
484         stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
485         stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
486
487         return 0;
488 }
489
490 static void
491 enetc_stats_reset(struct rte_eth_dev *dev)
492 {
493         struct enetc_eth_hw *hw =
494                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
495         struct enetc_hw *enetc_hw = &hw->hw;
496
497         enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
498 }
499
500 static void
501 enetc_dev_close(struct rte_eth_dev *dev)
502 {
503         uint16_t i;
504
505         PMD_INIT_FUNC_TRACE();
506         enetc_dev_stop(dev);
507
508         for (i = 0; i < dev->data->nb_rx_queues; i++) {
509                 enetc_rx_queue_release(dev->data->rx_queues[i]);
510                 dev->data->rx_queues[i] = NULL;
511         }
512         dev->data->nb_rx_queues = 0;
513
514         for (i = 0; i < dev->data->nb_tx_queues; i++) {
515                 enetc_tx_queue_release(dev->data->tx_queues[i]);
516                 dev->data->tx_queues[i] = NULL;
517         }
518         dev->data->nb_tx_queues = 0;
519 }
520
521 static void
522 enetc_promiscuous_enable(struct rte_eth_dev *dev)
523 {
524         struct enetc_eth_hw *hw =
525                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
526         struct enetc_hw *enetc_hw = &hw->hw;
527         uint32_t psipmr = 0;
528
529         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
530
531         /* Setting to enable promiscuous mode*/
532         psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
533
534         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
535 }
536
537 static void
538 enetc_promiscuous_disable(struct rte_eth_dev *dev)
539 {
540         struct enetc_eth_hw *hw =
541                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
542         struct enetc_hw *enetc_hw = &hw->hw;
543         uint32_t psipmr = 0;
544
545         /* Setting to disable promiscuous mode for SI0*/
546         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
547         psipmr &= (~ENETC_PSIPMR_SET_UP(0));
548
549         if (dev->data->all_multicast == 0)
550                 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
551
552         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
553 }
554
555 static void
556 enetc_allmulticast_enable(struct rte_eth_dev *dev)
557 {
558         struct enetc_eth_hw *hw =
559                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
560         struct enetc_hw *enetc_hw = &hw->hw;
561         uint32_t psipmr = 0;
562
563         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
564
565         /* Setting to enable allmulticast mode for SI0*/
566         psipmr |= ENETC_PSIPMR_SET_MP(0);
567
568         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
569 }
570
571 static void
572 enetc_allmulticast_disable(struct rte_eth_dev *dev)
573 {
574         struct enetc_eth_hw *hw =
575                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
576         struct enetc_hw *enetc_hw = &hw->hw;
577         uint32_t psipmr = 0;
578
579         if (dev->data->promiscuous == 1)
580                 return; /* must remain in all_multicast mode */
581
582         /* Setting to disable all multicast mode for SI0*/
583         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
584                                ~(ENETC_PSIPMR_SET_MP(0));
585
586         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
587 }
588
589 static int
590 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
591 {
592         struct enetc_eth_hw *hw =
593                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
594         struct enetc_hw *enetc_hw = &hw->hw;
595         uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
596
597         /* check that mtu is within the allowed range */
598         if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
599                 return -EINVAL;
600
601         /*
602          * Refuse mtu that requires the support of scattered packets
603          * when this feature has not been enabled before.
604          */
605         if (dev->data->min_rx_buf_size &&
606                 !dev->data->scattered_rx && frame_size >
607                 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
608                 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
609                 return -EINVAL;
610         }
611
612         if (frame_size > ETHER_MAX_LEN)
613                 dev->data->dev_conf.rxmode.offloads &=
614                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
615         else
616                 dev->data->dev_conf.rxmode.offloads &=
617                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
618
619         enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
620         enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
621
622         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
623
624         /*setting the MTU*/
625         enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
626                       ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
627
628         return 0;
629 }
630
631 static int
632 enetc_dev_configure(struct rte_eth_dev *dev)
633 {
634         struct enetc_eth_hw *hw =
635                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
636         struct enetc_hw *enetc_hw = &hw->hw;
637         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
638         uint64_t rx_offloads = eth_conf->rxmode.offloads;
639
640         PMD_INIT_FUNC_TRACE();
641
642         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
643                 uint32_t max_len;
644
645                 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
646
647                 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
648                               ENETC_SET_MAXFRM(max_len));
649                 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
650                               ENETC_MAC_MAXFRM_SIZE);
651                 enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
652                               2 * ENETC_MAC_MAXFRM_SIZE);
653                 dev->data->mtu = ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;
654         }
655
656         if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
657                 int config;
658
659                 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
660                 config |= ENETC_PM0_CRC;
661                 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
662         }
663
664         return 0;
665 }
666
667 static int
668 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
669 {
670         struct enetc_eth_adapter *priv =
671                         ENETC_DEV_PRIVATE(dev->data->dev_private);
672         struct enetc_bdr *rx_ring;
673         uint32_t rx_data;
674
675         rx_ring = dev->data->rx_queues[qidx];
676         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
677                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
678                                          ENETC_RBMR);
679                 rx_data = rx_data | ENETC_RBMR_EN;
680                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
681                                rx_data);
682                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
683         }
684
685         return 0;
686 }
687
688 static int
689 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
690 {
691         struct enetc_eth_adapter *priv =
692                         ENETC_DEV_PRIVATE(dev->data->dev_private);
693         struct enetc_bdr *rx_ring;
694         uint32_t rx_data;
695
696         rx_ring = dev->data->rx_queues[qidx];
697         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
698                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
699                                          ENETC_RBMR);
700                 rx_data = rx_data & (~ENETC_RBMR_EN);
701                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
702                                rx_data);
703                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
704         }
705
706         return 0;
707 }
708
709 static int
710 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
711 {
712         struct enetc_eth_adapter *priv =
713                         ENETC_DEV_PRIVATE(dev->data->dev_private);
714         struct enetc_bdr *tx_ring;
715         uint32_t tx_data;
716
717         tx_ring = dev->data->tx_queues[qidx];
718         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
719                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
720                                          ENETC_TBMR);
721                 tx_data = tx_data | ENETC_TBMR_EN;
722                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
723                                tx_data);
724                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
725         }
726
727         return 0;
728 }
729
730 static int
731 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
732 {
733         struct enetc_eth_adapter *priv =
734                         ENETC_DEV_PRIVATE(dev->data->dev_private);
735         struct enetc_bdr *tx_ring;
736         uint32_t tx_data;
737
738         tx_ring = dev->data->tx_queues[qidx];
739         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
740                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
741                                          ENETC_TBMR);
742                 tx_data = tx_data & (~ENETC_TBMR_EN);
743                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
744                                tx_data);
745                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
746         }
747
748         return 0;
749 }
750
751 /*
752  * The set of PCI devices this driver supports
753  */
754 static const struct rte_pci_id pci_id_enetc_map[] = {
755         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
756         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
757         { .vendor_id = 0, /* sentinel */ },
758 };
759
760 /* Features supported by this driver */
761 static const struct eth_dev_ops enetc_ops = {
762         .dev_configure        = enetc_dev_configure,
763         .dev_start            = enetc_dev_start,
764         .dev_stop             = enetc_dev_stop,
765         .dev_close            = enetc_dev_close,
766         .link_update          = enetc_link_update,
767         .stats_get            = enetc_stats_get,
768         .stats_reset          = enetc_stats_reset,
769         .promiscuous_enable   = enetc_promiscuous_enable,
770         .promiscuous_disable  = enetc_promiscuous_disable,
771         .allmulticast_enable  = enetc_allmulticast_enable,
772         .allmulticast_disable = enetc_allmulticast_disable,
773         .dev_infos_get        = enetc_dev_infos_get,
774         .mtu_set              = enetc_mtu_set,
775         .rx_queue_setup       = enetc_rx_queue_setup,
776         .rx_queue_start       = enetc_rx_queue_start,
777         .rx_queue_stop        = enetc_rx_queue_stop,
778         .rx_queue_release     = enetc_rx_queue_release,
779         .tx_queue_setup       = enetc_tx_queue_setup,
780         .tx_queue_start       = enetc_tx_queue_start,
781         .tx_queue_stop        = enetc_tx_queue_stop,
782         .tx_queue_release     = enetc_tx_queue_release,
783         .dev_supported_ptypes_get = enetc_supported_ptypes_get,
784 };
785
786 /**
787  * Initialisation of the enetc device
788  *
789  * @param eth_dev
790  *   - Pointer to the structure rte_eth_dev
791  *
792  * @return
793  *   - On success, zero.
794  *   - On failure, negative value.
795  */
796 static int
797 enetc_dev_init(struct rte_eth_dev *eth_dev)
798 {
799         int error = 0;
800         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
801         struct enetc_eth_hw *hw =
802                 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
803
804         PMD_INIT_FUNC_TRACE();
805         eth_dev->dev_ops = &enetc_ops;
806         eth_dev->rx_pkt_burst = &enetc_recv_pkts;
807         eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
808
809         /* Retrieving and storing the HW base address of device */
810         hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
811         hw->device_id = pci_dev->id.device_id;
812
813         error = enetc_hardware_init(hw);
814         if (error != 0) {
815                 ENETC_PMD_ERR("Hardware initialization failed");
816                 return -1;
817         }
818
819         /* Allocate memory for storing MAC addresses */
820         eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth", ETHER_ADDR_LEN, 0);
821         if (!eth_dev->data->mac_addrs) {
822                 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
823                               "store MAC addresses",
824                               ETHER_ADDR_LEN * 1);
825                 error = -ENOMEM;
826                 return -1;
827         }
828
829         /* Copy the permanent MAC address */
830         ether_addr_copy((struct ether_addr *)hw->mac.addr,
831                         &eth_dev->data->mac_addrs[0]);
832
833         /* Set MTU */
834         enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
835                       ENETC_SET_MAXFRM(ETHER_MAX_LEN));
836         eth_dev->data->mtu = ETHER_MAX_LEN - ETHER_HDR_LEN - ETHER_CRC_LEN;
837
838         ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
839                         eth_dev->data->port_id, pci_dev->id.vendor_id,
840                         pci_dev->id.device_id);
841         return 0;
842 }
843
844 static int
845 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
846 {
847         PMD_INIT_FUNC_TRACE();
848         return 0;
849 }
850
851 static int
852 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
853                            struct rte_pci_device *pci_dev)
854 {
855         return rte_eth_dev_pci_generic_probe(pci_dev,
856                                              sizeof(struct enetc_eth_adapter),
857                                              enetc_dev_init);
858 }
859
860 static int
861 enetc_pci_remove(struct rte_pci_device *pci_dev)
862 {
863         return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
864 }
865
866 static struct rte_pci_driver rte_enetc_pmd = {
867         .id_table = pci_id_enetc_map,
868         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
869         .probe = enetc_pci_probe,
870         .remove = enetc_pci_remove,
871 };
872
873 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
874 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
875 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
876
877 RTE_INIT(enetc_pmd_init_log)
878 {
879         enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
880         if (enetc_logtype_pmd >= 0)
881                 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);
882 }