net/enetc: support VF
[dpdk.git] / drivers / net / enetc / enetc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4
5 #include <stdbool.h>
6 #include <ethdev_pci.h>
7 #include <rte_random.h>
8 #include <dpaax_iova_table.h>
9
10 #include "enetc_logs.h"
11 #include "enetc.h"
12
13 static int
14 enetc_dev_start(struct rte_eth_dev *dev)
15 {
16         struct enetc_eth_hw *hw =
17                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
18         struct enetc_hw *enetc_hw = &hw->hw;
19         uint32_t val;
20
21         PMD_INIT_FUNC_TRACE();
22         if (hw->device_id == ENETC_DEV_ID_VF)
23                 return 0;
24
25         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
26         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
27                       val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
28
29         /* Enable port */
30         val = enetc_port_rd(enetc_hw, ENETC_PMR);
31         enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
32
33         /* set auto-speed for RGMII */
34         if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
35                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
36                               ENETC_PM0_IFM_RGAUTO);
37                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
38                               ENETC_PM0_IFM_RGAUTO);
39         }
40         if (enetc_global_rd(enetc_hw,
41                             ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
42                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
43                               ENETC_PM0_IFM_XGMII);
44                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
45                               ENETC_PM0_IFM_XGMII);
46         }
47
48         return 0;
49 }
50
51 static int
52 enetc_dev_stop(struct rte_eth_dev *dev)
53 {
54         struct enetc_eth_hw *hw =
55                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
56         struct enetc_hw *enetc_hw = &hw->hw;
57         uint32_t val;
58
59         PMD_INIT_FUNC_TRACE();
60         dev->data->dev_started = 0;
61         if (hw->device_id == ENETC_DEV_ID_VF)
62                 return 0;
63
64         /* Disable port */
65         val = enetc_port_rd(enetc_hw, ENETC_PMR);
66         enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
67
68         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
69         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
70                       val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
71
72         return 0;
73 }
74
75 static const uint32_t *
76 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
77 {
78         static const uint32_t ptypes[] = {
79                 RTE_PTYPE_L2_ETHER,
80                 RTE_PTYPE_L3_IPV4,
81                 RTE_PTYPE_L3_IPV6,
82                 RTE_PTYPE_L4_TCP,
83                 RTE_PTYPE_L4_UDP,
84                 RTE_PTYPE_L4_SCTP,
85                 RTE_PTYPE_L4_ICMP,
86                 RTE_PTYPE_UNKNOWN
87         };
88
89         return ptypes;
90 }
91
92 /* return 0 means link status changed, -1 means not changed */
93 static int
94 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
95 {
96         struct enetc_eth_hw *hw =
97                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
98         struct enetc_hw *enetc_hw = &hw->hw;
99         struct rte_eth_link link;
100         uint32_t status;
101
102         PMD_INIT_FUNC_TRACE();
103
104         memset(&link, 0, sizeof(link));
105
106         status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
107
108         if (status & ENETC_LINK_MODE)
109                 link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
110         else
111                 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
112
113         if (status & ENETC_LINK_STATUS)
114                 link.link_status = RTE_ETH_LINK_UP;
115         else
116                 link.link_status = RTE_ETH_LINK_DOWN;
117
118         switch (status & ENETC_LINK_SPEED_MASK) {
119         case ENETC_LINK_SPEED_1G:
120                 link.link_speed = RTE_ETH_SPEED_NUM_1G;
121                 break;
122
123         case ENETC_LINK_SPEED_100M:
124                 link.link_speed = RTE_ETH_SPEED_NUM_100M;
125                 break;
126
127         default:
128         case ENETC_LINK_SPEED_10M:
129                 link.link_speed = RTE_ETH_SPEED_NUM_10M;
130         }
131
132         return rte_eth_linkstatus_set(dev, &link);
133 }
134
135 static void
136 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
137 {
138         char buf[RTE_ETHER_ADDR_FMT_SIZE];
139
140         rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
141         ENETC_PMD_NOTICE("%s%s\n", name, buf);
142 }
143
144 static int
145 enetc_hardware_init(struct enetc_eth_hw *hw)
146 {
147         struct enetc_hw *enetc_hw = &hw->hw;
148         uint32_t *mac = (uint32_t *)hw->mac.addr;
149         uint32_t high_mac = 0;
150         uint16_t low_mac = 0;
151
152         PMD_INIT_FUNC_TRACE();
153         /* Calculating and storing the base HW addresses */
154         hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
155         hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
156
157         /* WA for Rx lock-up HW erratum */
158         enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1);
159
160         /* set ENETC transaction flags to coherent, don't allocate.
161          * BD writes merge with surrounding cache line data, frame data writes
162          * overwrite cache line.
163          */
164         enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT);
165
166         /* Enabling Station Interface */
167         enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
168
169
170         if (hw->device_id == ENETC_DEV_ID_VF) {
171                 *mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR0);
172                 high_mac = (uint32_t)*mac;
173                 mac++;
174                 *mac = (uint32_t)enetc_rd(enetc_hw, ENETC_SIPMAR1);
175                 low_mac = (uint16_t)*mac;
176         } else {
177                 *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
178                 high_mac = (uint32_t)*mac;
179                 mac++;
180                 *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
181                 low_mac = (uint16_t)*mac;
182         }
183
184         if ((high_mac | low_mac) == 0) {
185                 char *first_byte;
186
187                 ENETC_PMD_NOTICE("MAC is not available for this SI, "
188                                 "set random MAC\n");
189                 mac = (uint32_t *)hw->mac.addr;
190                 *mac = (uint32_t)rte_rand();
191                 first_byte = (char *)mac;
192                 *first_byte &= 0xfe;    /* clear multicast bit */
193                 *first_byte |= 0x02;    /* set local assignment bit (IEEE802) */
194
195                 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
196                 mac++;
197                 *mac = (uint16_t)rte_rand();
198                 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
199                 print_ethaddr("New address: ",
200                               (const struct rte_ether_addr *)hw->mac.addr);
201         }
202
203         return 0;
204 }
205
206 static int
207 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
208                     struct rte_eth_dev_info *dev_info)
209 {
210         PMD_INIT_FUNC_TRACE();
211         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
212                 .nb_max = MAX_BD_COUNT,
213                 .nb_min = MIN_BD_COUNT,
214                 .nb_align = BD_ALIGN,
215         };
216         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
217                 .nb_max = MAX_BD_COUNT,
218                 .nb_min = MIN_BD_COUNT,
219                 .nb_align = BD_ALIGN,
220         };
221         dev_info->max_rx_queues = MAX_RX_RINGS;
222         dev_info->max_tx_queues = MAX_TX_RINGS;
223         dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
224         dev_info->rx_offload_capa =
225                 (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
226                  RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
227                  RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
228                  RTE_ETH_RX_OFFLOAD_KEEP_CRC);
229
230         return 0;
231 }
232
233 static int
234 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
235 {
236         int size;
237
238         size = nb_desc * sizeof(struct enetc_swbd);
239         txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
240         if (txr->q_swbd == NULL)
241                 return -ENOMEM;
242
243         size = nb_desc * sizeof(struct enetc_tx_bd);
244         txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
245         if (txr->bd_base == NULL) {
246                 rte_free(txr->q_swbd);
247                 txr->q_swbd = NULL;
248                 return -ENOMEM;
249         }
250
251         txr->bd_count = nb_desc;
252         txr->next_to_clean = 0;
253         txr->next_to_use = 0;
254
255         return 0;
256 }
257
258 static void
259 enetc_free_bdr(struct enetc_bdr *rxr)
260 {
261         rte_free(rxr->q_swbd);
262         rte_free(rxr->bd_base);
263         rxr->q_swbd = NULL;
264         rxr->bd_base = NULL;
265 }
266
267 static void
268 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
269 {
270         int idx = tx_ring->index;
271         phys_addr_t bd_address;
272
273         bd_address = (phys_addr_t)
274                      rte_mem_virt2iova((const void *)tx_ring->bd_base);
275         enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
276                        lower_32_bits((uint64_t)bd_address));
277         enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
278                        upper_32_bits((uint64_t)bd_address));
279         enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
280                        ENETC_RTBLENR_LEN(tx_ring->bd_count));
281
282         enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
283         enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
284         tx_ring->tcir = (void *)((size_t)hw->reg +
285                         ENETC_BDR(TX, idx, ENETC_TBCIR));
286         tx_ring->tcisr = (void *)((size_t)hw->reg +
287                          ENETC_BDR(TX, idx, ENETC_TBCISR));
288 }
289
290 static int
291 enetc_tx_queue_setup(struct rte_eth_dev *dev,
292                      uint16_t queue_idx,
293                      uint16_t nb_desc,
294                      unsigned int socket_id __rte_unused,
295                      const struct rte_eth_txconf *tx_conf)
296 {
297         int err = 0;
298         struct enetc_bdr *tx_ring;
299         struct rte_eth_dev_data *data = dev->data;
300         struct enetc_eth_adapter *priv =
301                         ENETC_DEV_PRIVATE(data->dev_private);
302
303         PMD_INIT_FUNC_TRACE();
304         if (nb_desc > MAX_BD_COUNT)
305                 return -1;
306
307         tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
308         if (tx_ring == NULL) {
309                 ENETC_PMD_ERR("Failed to allocate TX ring memory");
310                 err = -ENOMEM;
311                 return -1;
312         }
313
314         err = enetc_alloc_txbdr(tx_ring, nb_desc);
315         if (err)
316                 goto fail;
317
318         tx_ring->index = queue_idx;
319         tx_ring->ndev = dev;
320         enetc_setup_txbdr(&priv->hw.hw, tx_ring);
321         data->tx_queues[queue_idx] = tx_ring;
322
323         if (!tx_conf->tx_deferred_start) {
324                 /* enable ring */
325                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
326                                ENETC_TBMR, ENETC_TBMR_EN);
327                 dev->data->tx_queue_state[tx_ring->index] =
328                                RTE_ETH_QUEUE_STATE_STARTED;
329         } else {
330                 dev->data->tx_queue_state[tx_ring->index] =
331                                RTE_ETH_QUEUE_STATE_STOPPED;
332         }
333
334         return 0;
335 fail:
336         rte_free(tx_ring);
337
338         return err;
339 }
340
341 static void
342 enetc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
343 {
344         void *txq = dev->data->tx_queues[qid];
345
346         if (txq == NULL)
347                 return;
348
349         struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
350         struct enetc_eth_hw *eth_hw =
351                 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
352         struct enetc_hw *hw;
353         struct enetc_swbd *tx_swbd;
354         int i;
355         uint32_t val;
356
357         /* Disable the ring */
358         hw = &eth_hw->hw;
359         val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
360         val &= (~ENETC_TBMR_EN);
361         enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
362
363         /* clean the ring*/
364         i = tx_ring->next_to_clean;
365         tx_swbd = &tx_ring->q_swbd[i];
366         while (tx_swbd->buffer_addr != NULL) {
367                 rte_pktmbuf_free(tx_swbd->buffer_addr);
368                 tx_swbd->buffer_addr = NULL;
369                 tx_swbd++;
370                 i++;
371                 if (unlikely(i == tx_ring->bd_count)) {
372                         i = 0;
373                         tx_swbd = &tx_ring->q_swbd[i];
374                 }
375         }
376
377         enetc_free_bdr(tx_ring);
378         rte_free(tx_ring);
379 }
380
381 static int
382 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
383                   uint16_t nb_rx_desc)
384 {
385         int size;
386
387         size = nb_rx_desc * sizeof(struct enetc_swbd);
388         rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
389         if (rxr->q_swbd == NULL)
390                 return -ENOMEM;
391
392         size = nb_rx_desc * sizeof(union enetc_rx_bd);
393         rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
394         if (rxr->bd_base == NULL) {
395                 rte_free(rxr->q_swbd);
396                 rxr->q_swbd = NULL;
397                 return -ENOMEM;
398         }
399
400         rxr->bd_count = nb_rx_desc;
401         rxr->next_to_clean = 0;
402         rxr->next_to_use = 0;
403         rxr->next_to_alloc = 0;
404
405         return 0;
406 }
407
408 static void
409 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
410                   struct rte_mempool *mb_pool)
411 {
412         int idx = rx_ring->index;
413         uint16_t buf_size;
414         phys_addr_t bd_address;
415
416         bd_address = (phys_addr_t)
417                      rte_mem_virt2iova((const void *)rx_ring->bd_base);
418         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
419                        lower_32_bits((uint64_t)bd_address));
420         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
421                        upper_32_bits((uint64_t)bd_address));
422         enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
423                        ENETC_RTBLENR_LEN(rx_ring->bd_count));
424
425         rx_ring->mb_pool = mb_pool;
426         rx_ring->rcir = (void *)((size_t)hw->reg +
427                         ENETC_BDR(RX, idx, ENETC_RBCIR));
428         enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
429         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
430                    RTE_PKTMBUF_HEADROOM);
431         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
432         enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
433 }
434
435 static int
436 enetc_rx_queue_setup(struct rte_eth_dev *dev,
437                      uint16_t rx_queue_id,
438                      uint16_t nb_rx_desc,
439                      unsigned int socket_id __rte_unused,
440                      const struct rte_eth_rxconf *rx_conf,
441                      struct rte_mempool *mb_pool)
442 {
443         int err = 0;
444         struct enetc_bdr *rx_ring;
445         struct rte_eth_dev_data *data =  dev->data;
446         struct enetc_eth_adapter *adapter =
447                         ENETC_DEV_PRIVATE(data->dev_private);
448         uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
449
450         PMD_INIT_FUNC_TRACE();
451         if (nb_rx_desc > MAX_BD_COUNT)
452                 return -1;
453
454         rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
455         if (rx_ring == NULL) {
456                 ENETC_PMD_ERR("Failed to allocate RX ring memory");
457                 err = -ENOMEM;
458                 return err;
459         }
460
461         err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
462         if (err)
463                 goto fail;
464
465         rx_ring->index = rx_queue_id;
466         rx_ring->ndev = dev;
467         enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
468         data->rx_queues[rx_queue_id] = rx_ring;
469
470         if (!rx_conf->rx_deferred_start) {
471                 /* enable ring */
472                 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
473                                ENETC_RBMR_EN);
474                 dev->data->rx_queue_state[rx_ring->index] =
475                                RTE_ETH_QUEUE_STATE_STARTED;
476         } else {
477                 dev->data->rx_queue_state[rx_ring->index] =
478                                RTE_ETH_QUEUE_STATE_STOPPED;
479         }
480
481         rx_ring->crc_len = (uint8_t)((rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) ?
482                                      RTE_ETHER_CRC_LEN : 0);
483
484         return 0;
485 fail:
486         rte_free(rx_ring);
487
488         return err;
489 }
490
491 static void
492 enetc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
493 {
494         void *rxq = dev->data->rx_queues[qid];
495
496         if (rxq == NULL)
497                 return;
498
499         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
500         struct enetc_eth_hw *eth_hw =
501                 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
502         struct enetc_swbd *q_swbd;
503         struct enetc_hw *hw;
504         uint32_t val;
505         int i;
506
507         /* Disable the ring */
508         hw = &eth_hw->hw;
509         val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
510         val &= (~ENETC_RBMR_EN);
511         enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
512
513         /* Clean the ring */
514         i = rx_ring->next_to_clean;
515         q_swbd = &rx_ring->q_swbd[i];
516         while (i != rx_ring->next_to_use) {
517                 rte_pktmbuf_free(q_swbd->buffer_addr);
518                 q_swbd->buffer_addr = NULL;
519                 q_swbd++;
520                 i++;
521                 if (unlikely(i == rx_ring->bd_count)) {
522                         i = 0;
523                         q_swbd = &rx_ring->q_swbd[i];
524                 }
525         }
526
527         enetc_free_bdr(rx_ring);
528         rte_free(rx_ring);
529 }
530
531 static
532 int enetc_stats_get(struct rte_eth_dev *dev,
533                     struct rte_eth_stats *stats)
534 {
535         struct enetc_eth_hw *hw =
536                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
537         struct enetc_hw *enetc_hw = &hw->hw;
538
539         /* Total received packets, bad + good, if we want to get counters of
540          * only good received packets then use ENETC_PM0_RFRM,
541          * ENETC_PM0_TFRM registers.
542          */
543         stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
544         stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
545         stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
546         stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
547         /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
548          * truncated packets
549          */
550         stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
551         stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
552         stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
553
554         return 0;
555 }
556
557 static int
558 enetc_stats_reset(struct rte_eth_dev *dev)
559 {
560         struct enetc_eth_hw *hw =
561                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
562         struct enetc_hw *enetc_hw = &hw->hw;
563
564         enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
565
566         return 0;
567 }
568
569 static int
570 enetc_dev_close(struct rte_eth_dev *dev)
571 {
572         uint16_t i;
573         int ret;
574
575         PMD_INIT_FUNC_TRACE();
576         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
577                 return 0;
578
579         ret = enetc_dev_stop(dev);
580
581         for (i = 0; i < dev->data->nb_rx_queues; i++) {
582                 enetc_rx_queue_release(dev, i);
583                 dev->data->rx_queues[i] = NULL;
584         }
585         dev->data->nb_rx_queues = 0;
586
587         for (i = 0; i < dev->data->nb_tx_queues; i++) {
588                 enetc_tx_queue_release(dev, i);
589                 dev->data->tx_queues[i] = NULL;
590         }
591         dev->data->nb_tx_queues = 0;
592
593         if (rte_eal_iova_mode() == RTE_IOVA_PA)
594                 dpaax_iova_table_depopulate();
595
596         return ret;
597 }
598
599 static int
600 enetc_promiscuous_enable(struct rte_eth_dev *dev)
601 {
602         struct enetc_eth_hw *hw =
603                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
604         struct enetc_hw *enetc_hw = &hw->hw;
605         uint32_t psipmr = 0;
606
607         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
608
609         /* Setting to enable promiscuous mode*/
610         psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
611
612         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
613
614         return 0;
615 }
616
617 static int
618 enetc_promiscuous_disable(struct rte_eth_dev *dev)
619 {
620         struct enetc_eth_hw *hw =
621                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
622         struct enetc_hw *enetc_hw = &hw->hw;
623         uint32_t psipmr = 0;
624
625         /* Setting to disable promiscuous mode for SI0*/
626         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
627         psipmr &= (~ENETC_PSIPMR_SET_UP(0));
628
629         if (dev->data->all_multicast == 0)
630                 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
631
632         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
633
634         return 0;
635 }
636
637 static int
638 enetc_allmulticast_enable(struct rte_eth_dev *dev)
639 {
640         struct enetc_eth_hw *hw =
641                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
642         struct enetc_hw *enetc_hw = &hw->hw;
643         uint32_t psipmr = 0;
644
645         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
646
647         /* Setting to enable allmulticast mode for SI0*/
648         psipmr |= ENETC_PSIPMR_SET_MP(0);
649
650         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
651
652         return 0;
653 }
654
655 static int
656 enetc_allmulticast_disable(struct rte_eth_dev *dev)
657 {
658         struct enetc_eth_hw *hw =
659                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
660         struct enetc_hw *enetc_hw = &hw->hw;
661         uint32_t psipmr = 0;
662
663         if (dev->data->promiscuous == 1)
664                 return 0; /* must remain in all_multicast mode */
665
666         /* Setting to disable all multicast mode for SI0*/
667         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
668                                ~(ENETC_PSIPMR_SET_MP(0));
669
670         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
671
672         return 0;
673 }
674
675 static int
676 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
677 {
678         struct enetc_eth_hw *hw =
679                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680         struct enetc_hw *enetc_hw = &hw->hw;
681         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
682
683         /*
684          * Refuse mtu that requires the support of scattered packets
685          * when this feature has not been enabled before.
686          */
687         if (dev->data->min_rx_buf_size &&
688                 !dev->data->scattered_rx && frame_size >
689                 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
690                 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
691                 return -EINVAL;
692         }
693
694         enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
695         enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
696
697         /*setting the MTU*/
698         enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
699                       ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
700
701         return 0;
702 }
703
704 static int
705 enetc_dev_configure(struct rte_eth_dev *dev)
706 {
707         struct enetc_eth_hw *hw =
708                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
709         struct enetc_hw *enetc_hw = &hw->hw;
710         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
711         uint64_t rx_offloads = eth_conf->rxmode.offloads;
712         uint32_t checksum = L3_CKSUM | L4_CKSUM;
713         uint32_t max_len;
714
715         PMD_INIT_FUNC_TRACE();
716
717         max_len = dev->data->dev_conf.rxmode.mtu + RTE_ETHER_HDR_LEN +
718                 RTE_ETHER_CRC_LEN;
719         enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(max_len));
720         enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
721         enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
722
723         if (rx_offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
724                 int config;
725
726                 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
727                 config |= ENETC_PM0_CRC;
728                 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
729         }
730
731         if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
732                 checksum &= ~L3_CKSUM;
733
734         if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
735                 checksum &= ~L4_CKSUM;
736
737         enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
738
739
740         return 0;
741 }
742
743 static int
744 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
745 {
746         struct enetc_eth_adapter *priv =
747                         ENETC_DEV_PRIVATE(dev->data->dev_private);
748         struct enetc_bdr *rx_ring;
749         uint32_t rx_data;
750
751         rx_ring = dev->data->rx_queues[qidx];
752         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
753                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
754                                          ENETC_RBMR);
755                 rx_data = rx_data | ENETC_RBMR_EN;
756                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
757                                rx_data);
758                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
759         }
760
761         return 0;
762 }
763
764 static int
765 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
766 {
767         struct enetc_eth_adapter *priv =
768                         ENETC_DEV_PRIVATE(dev->data->dev_private);
769         struct enetc_bdr *rx_ring;
770         uint32_t rx_data;
771
772         rx_ring = dev->data->rx_queues[qidx];
773         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
774                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
775                                          ENETC_RBMR);
776                 rx_data = rx_data & (~ENETC_RBMR_EN);
777                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
778                                rx_data);
779                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
780         }
781
782         return 0;
783 }
784
785 static int
786 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
787 {
788         struct enetc_eth_adapter *priv =
789                         ENETC_DEV_PRIVATE(dev->data->dev_private);
790         struct enetc_bdr *tx_ring;
791         uint32_t tx_data;
792
793         tx_ring = dev->data->tx_queues[qidx];
794         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
795                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
796                                          ENETC_TBMR);
797                 tx_data = tx_data | ENETC_TBMR_EN;
798                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
799                                tx_data);
800                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
801         }
802
803         return 0;
804 }
805
806 static int
807 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
808 {
809         struct enetc_eth_adapter *priv =
810                         ENETC_DEV_PRIVATE(dev->data->dev_private);
811         struct enetc_bdr *tx_ring;
812         uint32_t tx_data;
813
814         tx_ring = dev->data->tx_queues[qidx];
815         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
816                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
817                                          ENETC_TBMR);
818                 tx_data = tx_data & (~ENETC_TBMR_EN);
819                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
820                                tx_data);
821                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
822         }
823
824         return 0;
825 }
826
827 /*
828  * The set of PCI devices this driver supports
829  */
830 static const struct rte_pci_id pci_id_enetc_map[] = {
831         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
832         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
833         { .vendor_id = 0, /* sentinel */ },
834 };
835
836 /* Features supported by this driver */
837 static const struct eth_dev_ops enetc_ops = {
838         .dev_configure        = enetc_dev_configure,
839         .dev_start            = enetc_dev_start,
840         .dev_stop             = enetc_dev_stop,
841         .dev_close            = enetc_dev_close,
842         .link_update          = enetc_link_update,
843         .stats_get            = enetc_stats_get,
844         .stats_reset          = enetc_stats_reset,
845         .promiscuous_enable   = enetc_promiscuous_enable,
846         .promiscuous_disable  = enetc_promiscuous_disable,
847         .allmulticast_enable  = enetc_allmulticast_enable,
848         .allmulticast_disable = enetc_allmulticast_disable,
849         .dev_infos_get        = enetc_dev_infos_get,
850         .mtu_set              = enetc_mtu_set,
851         .rx_queue_setup       = enetc_rx_queue_setup,
852         .rx_queue_start       = enetc_rx_queue_start,
853         .rx_queue_stop        = enetc_rx_queue_stop,
854         .rx_queue_release     = enetc_rx_queue_release,
855         .tx_queue_setup       = enetc_tx_queue_setup,
856         .tx_queue_start       = enetc_tx_queue_start,
857         .tx_queue_stop        = enetc_tx_queue_stop,
858         .tx_queue_release     = enetc_tx_queue_release,
859         .dev_supported_ptypes_get = enetc_supported_ptypes_get,
860 };
861
862 /**
863  * Initialisation of the enetc device
864  *
865  * @param eth_dev
866  *   - Pointer to the structure rte_eth_dev
867  *
868  * @return
869  *   - On success, zero.
870  *   - On failure, negative value.
871  */
872 static int
873 enetc_dev_init(struct rte_eth_dev *eth_dev)
874 {
875         int error = 0;
876         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
877         struct enetc_eth_hw *hw =
878                 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
879
880         PMD_INIT_FUNC_TRACE();
881         eth_dev->dev_ops = &enetc_ops;
882         eth_dev->rx_pkt_burst = &enetc_recv_pkts;
883         eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
884
885         /* Retrieving and storing the HW base address of device */
886         hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
887         hw->device_id = pci_dev->id.device_id;
888
889         error = enetc_hardware_init(hw);
890         if (error != 0) {
891                 ENETC_PMD_ERR("Hardware initialization failed");
892                 return -1;
893         }
894
895         /* Allocate memory for storing MAC addresses */
896         eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
897                                         RTE_ETHER_ADDR_LEN, 0);
898         if (!eth_dev->data->mac_addrs) {
899                 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
900                               "store MAC addresses",
901                               RTE_ETHER_ADDR_LEN * 1);
902                 error = -ENOMEM;
903                 return -1;
904         }
905
906         /* Copy the permanent MAC address */
907         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
908                         &eth_dev->data->mac_addrs[0]);
909
910         /* Set MTU */
911         enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
912                       ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
913         eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
914                 RTE_ETHER_CRC_LEN;
915
916         if (rte_eal_iova_mode() == RTE_IOVA_PA)
917                 dpaax_iova_table_populate();
918
919         ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
920                         eth_dev->data->port_id, pci_dev->id.vendor_id,
921                         pci_dev->id.device_id);
922         return 0;
923 }
924
925 static int
926 enetc_dev_uninit(struct rte_eth_dev *eth_dev)
927 {
928         PMD_INIT_FUNC_TRACE();
929
930         return enetc_dev_close(eth_dev);
931 }
932
933 static int
934 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
935                            struct rte_pci_device *pci_dev)
936 {
937         return rte_eth_dev_pci_generic_probe(pci_dev,
938                                              sizeof(struct enetc_eth_adapter),
939                                              enetc_dev_init);
940 }
941
942 static int
943 enetc_pci_remove(struct rte_pci_device *pci_dev)
944 {
945         return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
946 }
947
948 static struct rte_pci_driver rte_enetc_pmd = {
949         .id_table = pci_id_enetc_map,
950         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
951         .probe = enetc_pci_probe,
952         .remove = enetc_pci_remove,
953 };
954
955 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
956 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
957 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
958 RTE_LOG_REGISTER_DEFAULT(enetc_logtype_pmd, NOTICE);