net: add rte prefix to ether defines
[dpdk.git] / drivers / net / enetc / enetc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 NXP
3  */
4
5 #include <stdbool.h>
6 #include <rte_ethdev_pci.h>
7
8 #include "enetc_logs.h"
9 #include "enetc.h"
10
11 int enetc_logtype_pmd;
12
13 static int
14 enetc_dev_start(struct rte_eth_dev *dev)
15 {
16         struct enetc_eth_hw *hw =
17                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18         struct enetc_hw *enetc_hw = &hw->hw;
19         uint32_t val;
20
21         PMD_INIT_FUNC_TRACE();
22         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
23         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
24                       val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
25
26         /* Enable port */
27         val = enetc_port_rd(enetc_hw, ENETC_PMR);
28         enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
29
30         /* set auto-speed for RGMII */
31         if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
32                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
33                               ENETC_PM0_IFM_RGAUTO);
34                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
35                               ENETC_PM0_IFM_RGAUTO);
36         }
37         if (enetc_global_rd(enetc_hw,
38                             ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
39                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
40                               ENETC_PM0_IFM_XGMII);
41                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
42                               ENETC_PM0_IFM_XGMII);
43         }
44
45         return 0;
46 }
47
48 static void
49 enetc_dev_stop(struct rte_eth_dev *dev)
50 {
51         struct enetc_eth_hw *hw =
52                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
53         struct enetc_hw *enetc_hw = &hw->hw;
54         uint32_t val;
55
56         PMD_INIT_FUNC_TRACE();
57         /* Disable port */
58         val = enetc_port_rd(enetc_hw, ENETC_PMR);
59         enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
60
61         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
62         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
63                       val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
64 }
65
66 static const uint32_t *
67 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
68 {
69         static const uint32_t ptypes[] = {
70                 RTE_PTYPE_L2_ETHER,
71                 RTE_PTYPE_L3_IPV4,
72                 RTE_PTYPE_L3_IPV6,
73                 RTE_PTYPE_L4_TCP,
74                 RTE_PTYPE_L4_UDP,
75                 RTE_PTYPE_L4_SCTP,
76                 RTE_PTYPE_L4_ICMP,
77                 RTE_PTYPE_UNKNOWN
78         };
79
80         return ptypes;
81 }
82
83 /* return 0 means link status changed, -1 means not changed */
84 static int
85 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
86 {
87         struct enetc_eth_hw *hw =
88                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
89         struct enetc_hw *enetc_hw = &hw->hw;
90         struct rte_eth_link link;
91         uint32_t status;
92
93         PMD_INIT_FUNC_TRACE();
94
95         memset(&link, 0, sizeof(link));
96
97         status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
98
99         if (status & ENETC_LINK_MODE)
100                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
101         else
102                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
103
104         if (status & ENETC_LINK_STATUS)
105                 link.link_status = ETH_LINK_UP;
106         else
107                 link.link_status = ETH_LINK_DOWN;
108
109         switch (status & ENETC_LINK_SPEED_MASK) {
110         case ENETC_LINK_SPEED_1G:
111                 link.link_speed = ETH_SPEED_NUM_1G;
112                 break;
113
114         case ENETC_LINK_SPEED_100M:
115                 link.link_speed = ETH_SPEED_NUM_100M;
116                 break;
117
118         default:
119         case ENETC_LINK_SPEED_10M:
120                 link.link_speed = ETH_SPEED_NUM_10M;
121         }
122
123         return rte_eth_linkstatus_set(dev, &link);
124 }
125
126 static int
127 enetc_hardware_init(struct enetc_eth_hw *hw)
128 {
129         struct enetc_hw *enetc_hw = &hw->hw;
130         uint32_t *mac = (uint32_t *)hw->mac.addr;
131
132         PMD_INIT_FUNC_TRACE();
133         /* Calculating and storing the base HW addresses */
134         hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
135         hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
136
137         /* Enabling Station Interface */
138         enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
139
140         *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
141         mac++;
142         *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
143
144         return 0;
145 }
146
147 static void
148 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
149                     struct rte_eth_dev_info *dev_info)
150 {
151         PMD_INIT_FUNC_TRACE();
152         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
153                 .nb_max = MAX_BD_COUNT,
154                 .nb_min = MIN_BD_COUNT,
155                 .nb_align = BD_ALIGN,
156         };
157         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
158                 .nb_max = MAX_BD_COUNT,
159                 .nb_min = MIN_BD_COUNT,
160                 .nb_align = BD_ALIGN,
161         };
162         dev_info->max_rx_queues = MAX_RX_RINGS;
163         dev_info->max_tx_queues = MAX_TX_RINGS;
164         dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
165         dev_info->rx_offload_capa =
166                 (DEV_RX_OFFLOAD_IPV4_CKSUM |
167                  DEV_RX_OFFLOAD_UDP_CKSUM |
168                  DEV_RX_OFFLOAD_TCP_CKSUM |
169                  DEV_RX_OFFLOAD_KEEP_CRC |
170                  DEV_RX_OFFLOAD_JUMBO_FRAME);
171 }
172
173 static int
174 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
175 {
176         int size;
177
178         size = nb_desc * sizeof(struct enetc_swbd);
179         txr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
180         if (txr->q_swbd == NULL)
181                 return -ENOMEM;
182
183         size = nb_desc * sizeof(struct enetc_tx_bd);
184         txr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
185         if (txr->bd_base == NULL) {
186                 rte_free(txr->q_swbd);
187                 txr->q_swbd = NULL;
188                 return -ENOMEM;
189         }
190
191         txr->bd_count = nb_desc;
192         txr->next_to_clean = 0;
193         txr->next_to_use = 0;
194
195         return 0;
196 }
197
198 static void
199 enetc_free_bdr(struct enetc_bdr *rxr)
200 {
201         rte_free(rxr->q_swbd);
202         rte_free(rxr->bd_base);
203         rxr->q_swbd = NULL;
204         rxr->bd_base = NULL;
205 }
206
207 static void
208 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
209 {
210         int idx = tx_ring->index;
211         phys_addr_t bd_address;
212
213         bd_address = (phys_addr_t)
214                      rte_mem_virt2iova((const void *)tx_ring->bd_base);
215         enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
216                        lower_32_bits((uint64_t)bd_address));
217         enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
218                        upper_32_bits((uint64_t)bd_address));
219         enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
220                        ENETC_RTBLENR_LEN(tx_ring->bd_count));
221
222         enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
223         enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
224         tx_ring->tcir = (void *)((size_t)hw->reg +
225                         ENETC_BDR(TX, idx, ENETC_TBCIR));
226         tx_ring->tcisr = (void *)((size_t)hw->reg +
227                          ENETC_BDR(TX, idx, ENETC_TBCISR));
228 }
229
230 static int
231 enetc_tx_queue_setup(struct rte_eth_dev *dev,
232                      uint16_t queue_idx,
233                      uint16_t nb_desc,
234                      unsigned int socket_id __rte_unused,
235                      const struct rte_eth_txconf *tx_conf)
236 {
237         int err = 0;
238         struct enetc_bdr *tx_ring;
239         struct rte_eth_dev_data *data = dev->data;
240         struct enetc_eth_adapter *priv =
241                         ENETC_DEV_PRIVATE(data->dev_private);
242
243         PMD_INIT_FUNC_TRACE();
244         if (nb_desc > MAX_BD_COUNT)
245                 return -1;
246
247         tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
248         if (tx_ring == NULL) {
249                 ENETC_PMD_ERR("Failed to allocate TX ring memory");
250                 err = -ENOMEM;
251                 return -1;
252         }
253
254         err = enetc_alloc_txbdr(tx_ring, nb_desc);
255         if (err)
256                 goto fail;
257
258         tx_ring->index = queue_idx;
259         tx_ring->ndev = dev;
260         enetc_setup_txbdr(&priv->hw.hw, tx_ring);
261         data->tx_queues[queue_idx] = tx_ring;
262
263         if (!tx_conf->tx_deferred_start) {
264                 /* enable ring */
265                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
266                                ENETC_TBMR, ENETC_TBMR_EN);
267                 dev->data->tx_queue_state[tx_ring->index] =
268                                RTE_ETH_QUEUE_STATE_STARTED;
269         } else {
270                 dev->data->tx_queue_state[tx_ring->index] =
271                                RTE_ETH_QUEUE_STATE_STOPPED;
272         }
273
274         return 0;
275 fail:
276         rte_free(tx_ring);
277
278         return err;
279 }
280
281 static void
282 enetc_tx_queue_release(void *txq)
283 {
284         if (txq == NULL)
285                 return;
286
287         struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
288         struct enetc_eth_hw *eth_hw =
289                 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
290         struct enetc_hw *hw;
291         struct enetc_swbd *tx_swbd;
292         int i;
293         uint32_t val;
294
295         /* Disable the ring */
296         hw = &eth_hw->hw;
297         val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
298         val &= (~ENETC_TBMR_EN);
299         enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
300
301         /* clean the ring*/
302         i = tx_ring->next_to_clean;
303         tx_swbd = &tx_ring->q_swbd[i];
304         while (tx_swbd->buffer_addr != NULL) {
305                 rte_pktmbuf_free(tx_swbd->buffer_addr);
306                 tx_swbd->buffer_addr = NULL;
307                 tx_swbd++;
308                 i++;
309                 if (unlikely(i == tx_ring->bd_count)) {
310                         i = 0;
311                         tx_swbd = &tx_ring->q_swbd[i];
312                 }
313         }
314
315         enetc_free_bdr(tx_ring);
316         rte_free(tx_ring);
317 }
318
319 static int
320 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
321                   uint16_t nb_rx_desc)
322 {
323         int size;
324
325         size = nb_rx_desc * sizeof(struct enetc_swbd);
326         rxr->q_swbd = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
327         if (rxr->q_swbd == NULL)
328                 return -ENOMEM;
329
330         size = nb_rx_desc * sizeof(union enetc_rx_bd);
331         rxr->bd_base = rte_malloc(NULL, size, RTE_CACHE_LINE_SIZE);
332         if (rxr->bd_base == NULL) {
333                 rte_free(rxr->q_swbd);
334                 rxr->q_swbd = NULL;
335                 return -ENOMEM;
336         }
337
338         rxr->bd_count = nb_rx_desc;
339         rxr->next_to_clean = 0;
340         rxr->next_to_use = 0;
341         rxr->next_to_alloc = 0;
342
343         return 0;
344 }
345
346 static void
347 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
348                   struct rte_mempool *mb_pool)
349 {
350         int idx = rx_ring->index;
351         uint16_t buf_size;
352         phys_addr_t bd_address;
353
354         bd_address = (phys_addr_t)
355                      rte_mem_virt2iova((const void *)rx_ring->bd_base);
356         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
357                        lower_32_bits((uint64_t)bd_address));
358         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
359                        upper_32_bits((uint64_t)bd_address));
360         enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
361                        ENETC_RTBLENR_LEN(rx_ring->bd_count));
362
363         rx_ring->mb_pool = mb_pool;
364         rx_ring->rcir = (void *)((size_t)hw->reg +
365                         ENETC_BDR(RX, idx, ENETC_RBCIR));
366         enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
367         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
368                    RTE_PKTMBUF_HEADROOM);
369         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
370         enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
371 }
372
373 static int
374 enetc_rx_queue_setup(struct rte_eth_dev *dev,
375                      uint16_t rx_queue_id,
376                      uint16_t nb_rx_desc,
377                      unsigned int socket_id __rte_unused,
378                      const struct rte_eth_rxconf *rx_conf,
379                      struct rte_mempool *mb_pool)
380 {
381         int err = 0;
382         struct enetc_bdr *rx_ring;
383         struct rte_eth_dev_data *data =  dev->data;
384         struct enetc_eth_adapter *adapter =
385                         ENETC_DEV_PRIVATE(data->dev_private);
386         uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
387
388         PMD_INIT_FUNC_TRACE();
389         if (nb_rx_desc > MAX_BD_COUNT)
390                 return -1;
391
392         rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
393         if (rx_ring == NULL) {
394                 ENETC_PMD_ERR("Failed to allocate RX ring memory");
395                 err = -ENOMEM;
396                 return err;
397         }
398
399         err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
400         if (err)
401                 goto fail;
402
403         rx_ring->index = rx_queue_id;
404         rx_ring->ndev = dev;
405         enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
406         data->rx_queues[rx_queue_id] = rx_ring;
407
408         if (!rx_conf->rx_deferred_start) {
409                 /* enable ring */
410                 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
411                                ENETC_RBMR_EN);
412                 dev->data->rx_queue_state[rx_ring->index] =
413                                RTE_ETH_QUEUE_STATE_STARTED;
414         } else {
415                 dev->data->rx_queue_state[rx_ring->index] =
416                                RTE_ETH_QUEUE_STATE_STOPPED;
417         }
418
419         rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
420                                      RTE_ETHER_CRC_LEN : 0);
421
422         return 0;
423 fail:
424         rte_free(rx_ring);
425
426         return err;
427 }
428
429 static void
430 enetc_rx_queue_release(void *rxq)
431 {
432         if (rxq == NULL)
433                 return;
434
435         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
436         struct enetc_eth_hw *eth_hw =
437                 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
438         struct enetc_swbd *q_swbd;
439         struct enetc_hw *hw;
440         uint32_t val;
441         int i;
442
443         /* Disable the ring */
444         hw = &eth_hw->hw;
445         val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
446         val &= (~ENETC_RBMR_EN);
447         enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
448
449         /* Clean the ring */
450         i = rx_ring->next_to_clean;
451         q_swbd = &rx_ring->q_swbd[i];
452         while (i != rx_ring->next_to_use) {
453                 rte_pktmbuf_free(q_swbd->buffer_addr);
454                 q_swbd->buffer_addr = NULL;
455                 q_swbd++;
456                 i++;
457                 if (unlikely(i == rx_ring->bd_count)) {
458                         i = 0;
459                         q_swbd = &rx_ring->q_swbd[i];
460                 }
461         }
462
463         enetc_free_bdr(rx_ring);
464         rte_free(rx_ring);
465 }
466
467 static
468 int enetc_stats_get(struct rte_eth_dev *dev,
469                     struct rte_eth_stats *stats)
470 {
471         struct enetc_eth_hw *hw =
472                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
473         struct enetc_hw *enetc_hw = &hw->hw;
474
475         /* Total received packets, bad + good, if we want to get counters of
476          * only good received packets then use ENETC_PM0_RFRM,
477          * ENETC_PM0_TFRM registers.
478          */
479         stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
480         stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
481         stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
482         stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
483         /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
484          * truncated packets
485          */
486         stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
487         stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
488         stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
489
490         return 0;
491 }
492
493 static void
494 enetc_stats_reset(struct rte_eth_dev *dev)
495 {
496         struct enetc_eth_hw *hw =
497                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
498         struct enetc_hw *enetc_hw = &hw->hw;
499
500         enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
501 }
502
503 static void
504 enetc_dev_close(struct rte_eth_dev *dev)
505 {
506         uint16_t i;
507
508         PMD_INIT_FUNC_TRACE();
509         enetc_dev_stop(dev);
510
511         for (i = 0; i < dev->data->nb_rx_queues; i++) {
512                 enetc_rx_queue_release(dev->data->rx_queues[i]);
513                 dev->data->rx_queues[i] = NULL;
514         }
515         dev->data->nb_rx_queues = 0;
516
517         for (i = 0; i < dev->data->nb_tx_queues; i++) {
518                 enetc_tx_queue_release(dev->data->tx_queues[i]);
519                 dev->data->tx_queues[i] = NULL;
520         }
521         dev->data->nb_tx_queues = 0;
522 }
523
524 static void
525 enetc_promiscuous_enable(struct rte_eth_dev *dev)
526 {
527         struct enetc_eth_hw *hw =
528                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
529         struct enetc_hw *enetc_hw = &hw->hw;
530         uint32_t psipmr = 0;
531
532         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
533
534         /* Setting to enable promiscuous mode*/
535         psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
536
537         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
538 }
539
540 static void
541 enetc_promiscuous_disable(struct rte_eth_dev *dev)
542 {
543         struct enetc_eth_hw *hw =
544                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
545         struct enetc_hw *enetc_hw = &hw->hw;
546         uint32_t psipmr = 0;
547
548         /* Setting to disable promiscuous mode for SI0*/
549         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
550         psipmr &= (~ENETC_PSIPMR_SET_UP(0));
551
552         if (dev->data->all_multicast == 0)
553                 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
554
555         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
556 }
557
558 static void
559 enetc_allmulticast_enable(struct rte_eth_dev *dev)
560 {
561         struct enetc_eth_hw *hw =
562                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
563         struct enetc_hw *enetc_hw = &hw->hw;
564         uint32_t psipmr = 0;
565
566         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
567
568         /* Setting to enable allmulticast mode for SI0*/
569         psipmr |= ENETC_PSIPMR_SET_MP(0);
570
571         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
572 }
573
574 static void
575 enetc_allmulticast_disable(struct rte_eth_dev *dev)
576 {
577         struct enetc_eth_hw *hw =
578                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
579         struct enetc_hw *enetc_hw = &hw->hw;
580         uint32_t psipmr = 0;
581
582         if (dev->data->promiscuous == 1)
583                 return; /* must remain in all_multicast mode */
584
585         /* Setting to disable all multicast mode for SI0*/
586         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
587                                ~(ENETC_PSIPMR_SET_MP(0));
588
589         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
590 }
591
592 static int
593 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
594 {
595         struct enetc_eth_hw *hw =
596                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
597         struct enetc_hw *enetc_hw = &hw->hw;
598         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
599
600         /* check that mtu is within the allowed range */
601         if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
602                 return -EINVAL;
603
604         /*
605          * Refuse mtu that requires the support of scattered packets
606          * when this feature has not been enabled before.
607          */
608         if (dev->data->min_rx_buf_size &&
609                 !dev->data->scattered_rx && frame_size >
610                 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
611                 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
612                 return -EINVAL;
613         }
614
615         if (frame_size > RTE_ETHER_MAX_LEN)
616                 dev->data->dev_conf.rxmode.offloads &=
617                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
618         else
619                 dev->data->dev_conf.rxmode.offloads &=
620                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
621
622         enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
623         enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
624
625         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
626
627         /*setting the MTU*/
628         enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
629                       ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
630
631         return 0;
632 }
633
634 static int
635 enetc_dev_configure(struct rte_eth_dev *dev)
636 {
637         struct enetc_eth_hw *hw =
638                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
639         struct enetc_hw *enetc_hw = &hw->hw;
640         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
641         uint64_t rx_offloads = eth_conf->rxmode.offloads;
642         uint32_t checksum = L3_CKSUM | L4_CKSUM;
643
644         PMD_INIT_FUNC_TRACE();
645
646         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
647                 uint32_t max_len;
648
649                 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
650
651                 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
652                               ENETC_SET_MAXFRM(max_len));
653                 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
654                               ENETC_MAC_MAXFRM_SIZE);
655                 enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
656                               2 * ENETC_MAC_MAXFRM_SIZE);
657                 dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
658                         RTE_ETHER_CRC_LEN;
659         }
660
661         if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
662                 int config;
663
664                 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
665                 config |= ENETC_PM0_CRC;
666                 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
667         }
668
669         if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
670                 checksum &= ~L3_CKSUM;
671
672         if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
673                 checksum &= ~L4_CKSUM;
674
675         enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
676
677
678         return 0;
679 }
680
681 static int
682 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
683 {
684         struct enetc_eth_adapter *priv =
685                         ENETC_DEV_PRIVATE(dev->data->dev_private);
686         struct enetc_bdr *rx_ring;
687         uint32_t rx_data;
688
689         rx_ring = dev->data->rx_queues[qidx];
690         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
691                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
692                                          ENETC_RBMR);
693                 rx_data = rx_data | ENETC_RBMR_EN;
694                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
695                                rx_data);
696                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
697         }
698
699         return 0;
700 }
701
702 static int
703 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
704 {
705         struct enetc_eth_adapter *priv =
706                         ENETC_DEV_PRIVATE(dev->data->dev_private);
707         struct enetc_bdr *rx_ring;
708         uint32_t rx_data;
709
710         rx_ring = dev->data->rx_queues[qidx];
711         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
712                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
713                                          ENETC_RBMR);
714                 rx_data = rx_data & (~ENETC_RBMR_EN);
715                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
716                                rx_data);
717                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
718         }
719
720         return 0;
721 }
722
723 static int
724 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
725 {
726         struct enetc_eth_adapter *priv =
727                         ENETC_DEV_PRIVATE(dev->data->dev_private);
728         struct enetc_bdr *tx_ring;
729         uint32_t tx_data;
730
731         tx_ring = dev->data->tx_queues[qidx];
732         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
733                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
734                                          ENETC_TBMR);
735                 tx_data = tx_data | ENETC_TBMR_EN;
736                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
737                                tx_data);
738                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
739         }
740
741         return 0;
742 }
743
744 static int
745 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
746 {
747         struct enetc_eth_adapter *priv =
748                         ENETC_DEV_PRIVATE(dev->data->dev_private);
749         struct enetc_bdr *tx_ring;
750         uint32_t tx_data;
751
752         tx_ring = dev->data->tx_queues[qidx];
753         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
754                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
755                                          ENETC_TBMR);
756                 tx_data = tx_data & (~ENETC_TBMR_EN);
757                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
758                                tx_data);
759                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
760         }
761
762         return 0;
763 }
764
765 /*
766  * The set of PCI devices this driver supports
767  */
768 static const struct rte_pci_id pci_id_enetc_map[] = {
769         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
770         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
771         { .vendor_id = 0, /* sentinel */ },
772 };
773
774 /* Features supported by this driver */
775 static const struct eth_dev_ops enetc_ops = {
776         .dev_configure        = enetc_dev_configure,
777         .dev_start            = enetc_dev_start,
778         .dev_stop             = enetc_dev_stop,
779         .dev_close            = enetc_dev_close,
780         .link_update          = enetc_link_update,
781         .stats_get            = enetc_stats_get,
782         .stats_reset          = enetc_stats_reset,
783         .promiscuous_enable   = enetc_promiscuous_enable,
784         .promiscuous_disable  = enetc_promiscuous_disable,
785         .allmulticast_enable  = enetc_allmulticast_enable,
786         .allmulticast_disable = enetc_allmulticast_disable,
787         .dev_infos_get        = enetc_dev_infos_get,
788         .mtu_set              = enetc_mtu_set,
789         .rx_queue_setup       = enetc_rx_queue_setup,
790         .rx_queue_start       = enetc_rx_queue_start,
791         .rx_queue_stop        = enetc_rx_queue_stop,
792         .rx_queue_release     = enetc_rx_queue_release,
793         .tx_queue_setup       = enetc_tx_queue_setup,
794         .tx_queue_start       = enetc_tx_queue_start,
795         .tx_queue_stop        = enetc_tx_queue_stop,
796         .tx_queue_release     = enetc_tx_queue_release,
797         .dev_supported_ptypes_get = enetc_supported_ptypes_get,
798 };
799
800 /**
801  * Initialisation of the enetc device
802  *
803  * @param eth_dev
804  *   - Pointer to the structure rte_eth_dev
805  *
806  * @return
807  *   - On success, zero.
808  *   - On failure, negative value.
809  */
810 static int
811 enetc_dev_init(struct rte_eth_dev *eth_dev)
812 {
813         int error = 0;
814         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
815         struct enetc_eth_hw *hw =
816                 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
817
818         PMD_INIT_FUNC_TRACE();
819         eth_dev->dev_ops = &enetc_ops;
820         eth_dev->rx_pkt_burst = &enetc_recv_pkts;
821         eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
822
823         /* Retrieving and storing the HW base address of device */
824         hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
825         hw->device_id = pci_dev->id.device_id;
826
827         error = enetc_hardware_init(hw);
828         if (error != 0) {
829                 ENETC_PMD_ERR("Hardware initialization failed");
830                 return -1;
831         }
832
833         /* Allocate memory for storing MAC addresses */
834         eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
835                                         RTE_ETHER_ADDR_LEN, 0);
836         if (!eth_dev->data->mac_addrs) {
837                 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
838                               "store MAC addresses",
839                               RTE_ETHER_ADDR_LEN * 1);
840                 error = -ENOMEM;
841                 return -1;
842         }
843
844         /* Copy the permanent MAC address */
845         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
846                         &eth_dev->data->mac_addrs[0]);
847
848         /* Set MTU */
849         enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
850                       ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
851         eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
852                 RTE_ETHER_CRC_LEN;
853
854         ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
855                         eth_dev->data->port_id, pci_dev->id.vendor_id,
856                         pci_dev->id.device_id);
857         return 0;
858 }
859
860 static int
861 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
862 {
863         PMD_INIT_FUNC_TRACE();
864         return 0;
865 }
866
867 static int
868 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
869                            struct rte_pci_device *pci_dev)
870 {
871         return rte_eth_dev_pci_generic_probe(pci_dev,
872                                              sizeof(struct enetc_eth_adapter),
873                                              enetc_dev_init);
874 }
875
876 static int
877 enetc_pci_remove(struct rte_pci_device *pci_dev)
878 {
879         return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
880 }
881
882 static struct rte_pci_driver rte_enetc_pmd = {
883         .id_table = pci_id_enetc_map,
884         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
885         .probe = enetc_pci_probe,
886         .remove = enetc_pci_remove,
887 };
888
889 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
890 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
891 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
892
893 RTE_INIT(enetc_pmd_init_log)
894 {
895         enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
896         if (enetc_logtype_pmd >= 0)
897                 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);
898 }