vdpa/mlx5: support queue update
[dpdk.git] / drivers / net / enetc / enetc_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2020 NXP
3  */
4
5 #include <stdbool.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_random.h>
8 #include <dpaax_iova_table.h>
9
10 #include "enetc_logs.h"
11 #include "enetc.h"
12
13 int enetc_logtype_pmd;
14
15 static int
16 enetc_dev_start(struct rte_eth_dev *dev)
17 {
18         struct enetc_eth_hw *hw =
19                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
20         struct enetc_hw *enetc_hw = &hw->hw;
21         uint32_t val;
22
23         PMD_INIT_FUNC_TRACE();
24         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
25         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
26                       val | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
27
28         /* Enable port */
29         val = enetc_port_rd(enetc_hw, ENETC_PMR);
30         enetc_port_wr(enetc_hw, ENETC_PMR, val | ENETC_PMR_EN);
31
32         /* set auto-speed for RGMII */
33         if (enetc_port_rd(enetc_hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG) {
34                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
35                               ENETC_PM0_IFM_RGAUTO);
36                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
37                               ENETC_PM0_IFM_RGAUTO);
38         }
39         if (enetc_global_rd(enetc_hw,
40                             ENETC_G_EPFBLPR(1)) == ENETC_G_EPFBLPR1_XGMII) {
41                 enetc_port_wr(enetc_hw, ENETC_PM0_IF_MODE,
42                               ENETC_PM0_IFM_XGMII);
43                 enetc_port_wr(enetc_hw, ENETC_PM1_IF_MODE,
44                               ENETC_PM0_IFM_XGMII);
45         }
46
47         return 0;
48 }
49
50 static void
51 enetc_dev_stop(struct rte_eth_dev *dev)
52 {
53         struct enetc_eth_hw *hw =
54                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
55         struct enetc_hw *enetc_hw = &hw->hw;
56         uint32_t val;
57
58         PMD_INIT_FUNC_TRACE();
59         /* Disable port */
60         val = enetc_port_rd(enetc_hw, ENETC_PMR);
61         enetc_port_wr(enetc_hw, ENETC_PMR, val & (~ENETC_PMR_EN));
62
63         val = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
64         enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG,
65                       val & (~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN)));
66 }
67
68 static const uint32_t *
69 enetc_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
70 {
71         static const uint32_t ptypes[] = {
72                 RTE_PTYPE_L2_ETHER,
73                 RTE_PTYPE_L3_IPV4,
74                 RTE_PTYPE_L3_IPV6,
75                 RTE_PTYPE_L4_TCP,
76                 RTE_PTYPE_L4_UDP,
77                 RTE_PTYPE_L4_SCTP,
78                 RTE_PTYPE_L4_ICMP,
79                 RTE_PTYPE_UNKNOWN
80         };
81
82         return ptypes;
83 }
84
85 /* return 0 means link status changed, -1 means not changed */
86 static int
87 enetc_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
88 {
89         struct enetc_eth_hw *hw =
90                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
91         struct enetc_hw *enetc_hw = &hw->hw;
92         struct rte_eth_link link;
93         uint32_t status;
94
95         PMD_INIT_FUNC_TRACE();
96
97         memset(&link, 0, sizeof(link));
98
99         status = enetc_port_rd(enetc_hw, ENETC_PM0_STATUS);
100
101         if (status & ENETC_LINK_MODE)
102                 link.link_duplex = ETH_LINK_FULL_DUPLEX;
103         else
104                 link.link_duplex = ETH_LINK_HALF_DUPLEX;
105
106         if (status & ENETC_LINK_STATUS)
107                 link.link_status = ETH_LINK_UP;
108         else
109                 link.link_status = ETH_LINK_DOWN;
110
111         switch (status & ENETC_LINK_SPEED_MASK) {
112         case ENETC_LINK_SPEED_1G:
113                 link.link_speed = ETH_SPEED_NUM_1G;
114                 break;
115
116         case ENETC_LINK_SPEED_100M:
117                 link.link_speed = ETH_SPEED_NUM_100M;
118                 break;
119
120         default:
121         case ENETC_LINK_SPEED_10M:
122                 link.link_speed = ETH_SPEED_NUM_10M;
123         }
124
125         return rte_eth_linkstatus_set(dev, &link);
126 }
127
128 static void
129 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr)
130 {
131         char buf[RTE_ETHER_ADDR_FMT_SIZE];
132
133         rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr);
134         ENETC_PMD_NOTICE("%s%s\n", name, buf);
135 }
136
137 static int
138 enetc_hardware_init(struct enetc_eth_hw *hw)
139 {
140         struct enetc_hw *enetc_hw = &hw->hw;
141         uint32_t *mac = (uint32_t *)hw->mac.addr;
142         uint32_t high_mac = 0;
143         uint16_t low_mac = 0;
144
145         PMD_INIT_FUNC_TRACE();
146         /* Calculating and storing the base HW addresses */
147         hw->hw.port = (void *)((size_t)hw->hw.reg + ENETC_PORT_BASE);
148         hw->hw.global = (void *)((size_t)hw->hw.reg + ENETC_GLOBAL_BASE);
149
150         /* WA for Rx lock-up HW erratum */
151         enetc_port_wr(enetc_hw, ENETC_PM0_RX_FIFO, 1);
152
153         /* set ENETC transaction flags to coherent, don't allocate.
154          * BD writes merge with surrounding cache line data, frame data writes
155          * overwrite cache line.
156          */
157         enetc_wr(enetc_hw, ENETC_SICAR0, ENETC_SICAR0_COHERENT);
158
159         /* Enabling Station Interface */
160         enetc_wr(enetc_hw, ENETC_SIMR, ENETC_SIMR_EN);
161
162         *mac = (uint32_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR0(0));
163         high_mac = (uint32_t)*mac;
164         mac++;
165         *mac = (uint16_t)enetc_port_rd(enetc_hw, ENETC_PSIPMAR1(0));
166         low_mac = (uint16_t)*mac;
167
168         if ((high_mac | low_mac) == 0) {
169                 char *first_byte;
170
171                 ENETC_PMD_NOTICE("MAC is not available for this SI, "
172                                 "set random MAC\n");
173                 mac = (uint32_t *)hw->mac.addr;
174                 *mac = (uint32_t)rte_rand();
175                 first_byte = (char *)mac;
176                 *first_byte &= 0xfe;    /* clear multicast bit */
177                 *first_byte |= 0x02;    /* set local assignment bit (IEEE802) */
178
179                 enetc_port_wr(enetc_hw, ENETC_PSIPMAR0(0), *mac);
180                 mac++;
181                 *mac = (uint16_t)rte_rand();
182                 enetc_port_wr(enetc_hw, ENETC_PSIPMAR1(0), *mac);
183                 print_ethaddr("New address: ",
184                               (const struct rte_ether_addr *)hw->mac.addr);
185         }
186
187         return 0;
188 }
189
190 static int
191 enetc_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
192                     struct rte_eth_dev_info *dev_info)
193 {
194         PMD_INIT_FUNC_TRACE();
195         dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
196                 .nb_max = MAX_BD_COUNT,
197                 .nb_min = MIN_BD_COUNT,
198                 .nb_align = BD_ALIGN,
199         };
200         dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
201                 .nb_max = MAX_BD_COUNT,
202                 .nb_min = MIN_BD_COUNT,
203                 .nb_align = BD_ALIGN,
204         };
205         dev_info->max_rx_queues = MAX_RX_RINGS;
206         dev_info->max_tx_queues = MAX_TX_RINGS;
207         dev_info->max_rx_pktlen = ENETC_MAC_MAXFRM_SIZE;
208         dev_info->rx_offload_capa =
209                 (DEV_RX_OFFLOAD_IPV4_CKSUM |
210                  DEV_RX_OFFLOAD_UDP_CKSUM |
211                  DEV_RX_OFFLOAD_TCP_CKSUM |
212                  DEV_RX_OFFLOAD_KEEP_CRC |
213                  DEV_RX_OFFLOAD_JUMBO_FRAME);
214
215         return 0;
216 }
217
218 static int
219 enetc_alloc_txbdr(struct enetc_bdr *txr, uint16_t nb_desc)
220 {
221         int size;
222
223         size = nb_desc * sizeof(struct enetc_swbd);
224         txr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
225         if (txr->q_swbd == NULL)
226                 return -ENOMEM;
227
228         size = nb_desc * sizeof(struct enetc_tx_bd);
229         txr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
230         if (txr->bd_base == NULL) {
231                 rte_free(txr->q_swbd);
232                 txr->q_swbd = NULL;
233                 return -ENOMEM;
234         }
235
236         txr->bd_count = nb_desc;
237         txr->next_to_clean = 0;
238         txr->next_to_use = 0;
239
240         return 0;
241 }
242
243 static void
244 enetc_free_bdr(struct enetc_bdr *rxr)
245 {
246         rte_free(rxr->q_swbd);
247         rte_free(rxr->bd_base);
248         rxr->q_swbd = NULL;
249         rxr->bd_base = NULL;
250 }
251
252 static void
253 enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
254 {
255         int idx = tx_ring->index;
256         phys_addr_t bd_address;
257
258         bd_address = (phys_addr_t)
259                      rte_mem_virt2iova((const void *)tx_ring->bd_base);
260         enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
261                        lower_32_bits((uint64_t)bd_address));
262         enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
263                        upper_32_bits((uint64_t)bd_address));
264         enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
265                        ENETC_RTBLENR_LEN(tx_ring->bd_count));
266
267         enetc_txbdr_wr(hw, idx, ENETC_TBCIR, 0);
268         enetc_txbdr_wr(hw, idx, ENETC_TBCISR, 0);
269         tx_ring->tcir = (void *)((size_t)hw->reg +
270                         ENETC_BDR(TX, idx, ENETC_TBCIR));
271         tx_ring->tcisr = (void *)((size_t)hw->reg +
272                          ENETC_BDR(TX, idx, ENETC_TBCISR));
273 }
274
275 static int
276 enetc_tx_queue_setup(struct rte_eth_dev *dev,
277                      uint16_t queue_idx,
278                      uint16_t nb_desc,
279                      unsigned int socket_id __rte_unused,
280                      const struct rte_eth_txconf *tx_conf)
281 {
282         int err = 0;
283         struct enetc_bdr *tx_ring;
284         struct rte_eth_dev_data *data = dev->data;
285         struct enetc_eth_adapter *priv =
286                         ENETC_DEV_PRIVATE(data->dev_private);
287
288         PMD_INIT_FUNC_TRACE();
289         if (nb_desc > MAX_BD_COUNT)
290                 return -1;
291
292         tx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
293         if (tx_ring == NULL) {
294                 ENETC_PMD_ERR("Failed to allocate TX ring memory");
295                 err = -ENOMEM;
296                 return -1;
297         }
298
299         err = enetc_alloc_txbdr(tx_ring, nb_desc);
300         if (err)
301                 goto fail;
302
303         tx_ring->index = queue_idx;
304         tx_ring->ndev = dev;
305         enetc_setup_txbdr(&priv->hw.hw, tx_ring);
306         data->tx_queues[queue_idx] = tx_ring;
307
308         if (!tx_conf->tx_deferred_start) {
309                 /* enable ring */
310                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index,
311                                ENETC_TBMR, ENETC_TBMR_EN);
312                 dev->data->tx_queue_state[tx_ring->index] =
313                                RTE_ETH_QUEUE_STATE_STARTED;
314         } else {
315                 dev->data->tx_queue_state[tx_ring->index] =
316                                RTE_ETH_QUEUE_STATE_STOPPED;
317         }
318
319         return 0;
320 fail:
321         rte_free(tx_ring);
322
323         return err;
324 }
325
326 static void
327 enetc_tx_queue_release(void *txq)
328 {
329         if (txq == NULL)
330                 return;
331
332         struct enetc_bdr *tx_ring = (struct enetc_bdr *)txq;
333         struct enetc_eth_hw *eth_hw =
334                 ENETC_DEV_PRIVATE_TO_HW(tx_ring->ndev->data->dev_private);
335         struct enetc_hw *hw;
336         struct enetc_swbd *tx_swbd;
337         int i;
338         uint32_t val;
339
340         /* Disable the ring */
341         hw = &eth_hw->hw;
342         val = enetc_txbdr_rd(hw, tx_ring->index, ENETC_TBMR);
343         val &= (~ENETC_TBMR_EN);
344         enetc_txbdr_wr(hw, tx_ring->index, ENETC_TBMR, val);
345
346         /* clean the ring*/
347         i = tx_ring->next_to_clean;
348         tx_swbd = &tx_ring->q_swbd[i];
349         while (tx_swbd->buffer_addr != NULL) {
350                 rte_pktmbuf_free(tx_swbd->buffer_addr);
351                 tx_swbd->buffer_addr = NULL;
352                 tx_swbd++;
353                 i++;
354                 if (unlikely(i == tx_ring->bd_count)) {
355                         i = 0;
356                         tx_swbd = &tx_ring->q_swbd[i];
357                 }
358         }
359
360         enetc_free_bdr(tx_ring);
361         rte_free(tx_ring);
362 }
363
364 static int
365 enetc_alloc_rxbdr(struct enetc_bdr *rxr,
366                   uint16_t nb_rx_desc)
367 {
368         int size;
369
370         size = nb_rx_desc * sizeof(struct enetc_swbd);
371         rxr->q_swbd = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
372         if (rxr->q_swbd == NULL)
373                 return -ENOMEM;
374
375         size = nb_rx_desc * sizeof(union enetc_rx_bd);
376         rxr->bd_base = rte_malloc(NULL, size, ENETC_BD_RING_ALIGN);
377         if (rxr->bd_base == NULL) {
378                 rte_free(rxr->q_swbd);
379                 rxr->q_swbd = NULL;
380                 return -ENOMEM;
381         }
382
383         rxr->bd_count = nb_rx_desc;
384         rxr->next_to_clean = 0;
385         rxr->next_to_use = 0;
386         rxr->next_to_alloc = 0;
387
388         return 0;
389 }
390
391 static void
392 enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring,
393                   struct rte_mempool *mb_pool)
394 {
395         int idx = rx_ring->index;
396         uint16_t buf_size;
397         phys_addr_t bd_address;
398
399         bd_address = (phys_addr_t)
400                      rte_mem_virt2iova((const void *)rx_ring->bd_base);
401         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
402                        lower_32_bits((uint64_t)bd_address));
403         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
404                        upper_32_bits((uint64_t)bd_address));
405         enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
406                        ENETC_RTBLENR_LEN(rx_ring->bd_count));
407
408         rx_ring->mb_pool = mb_pool;
409         rx_ring->rcir = (void *)((size_t)hw->reg +
410                         ENETC_BDR(RX, idx, ENETC_RBCIR));
411         enetc_refill_rx_ring(rx_ring, (enetc_bd_unused(rx_ring)));
412         buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rx_ring->mb_pool) -
413                    RTE_PKTMBUF_HEADROOM);
414         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, buf_size);
415         enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
416 }
417
418 static int
419 enetc_rx_queue_setup(struct rte_eth_dev *dev,
420                      uint16_t rx_queue_id,
421                      uint16_t nb_rx_desc,
422                      unsigned int socket_id __rte_unused,
423                      const struct rte_eth_rxconf *rx_conf,
424                      struct rte_mempool *mb_pool)
425 {
426         int err = 0;
427         struct enetc_bdr *rx_ring;
428         struct rte_eth_dev_data *data =  dev->data;
429         struct enetc_eth_adapter *adapter =
430                         ENETC_DEV_PRIVATE(data->dev_private);
431         uint64_t rx_offloads = data->dev_conf.rxmode.offloads;
432
433         PMD_INIT_FUNC_TRACE();
434         if (nb_rx_desc > MAX_BD_COUNT)
435                 return -1;
436
437         rx_ring = rte_zmalloc(NULL, sizeof(struct enetc_bdr), 0);
438         if (rx_ring == NULL) {
439                 ENETC_PMD_ERR("Failed to allocate RX ring memory");
440                 err = -ENOMEM;
441                 return err;
442         }
443
444         err = enetc_alloc_rxbdr(rx_ring, nb_rx_desc);
445         if (err)
446                 goto fail;
447
448         rx_ring->index = rx_queue_id;
449         rx_ring->ndev = dev;
450         enetc_setup_rxbdr(&adapter->hw.hw, rx_ring, mb_pool);
451         data->rx_queues[rx_queue_id] = rx_ring;
452
453         if (!rx_conf->rx_deferred_start) {
454                 /* enable ring */
455                 enetc_rxbdr_wr(&adapter->hw.hw, rx_ring->index, ENETC_RBMR,
456                                ENETC_RBMR_EN);
457                 dev->data->rx_queue_state[rx_ring->index] =
458                                RTE_ETH_QUEUE_STATE_STARTED;
459         } else {
460                 dev->data->rx_queue_state[rx_ring->index] =
461                                RTE_ETH_QUEUE_STATE_STOPPED;
462         }
463
464         rx_ring->crc_len = (uint8_t)((rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
465                                      RTE_ETHER_CRC_LEN : 0);
466
467         return 0;
468 fail:
469         rte_free(rx_ring);
470
471         return err;
472 }
473
474 static void
475 enetc_rx_queue_release(void *rxq)
476 {
477         if (rxq == NULL)
478                 return;
479
480         struct enetc_bdr *rx_ring = (struct enetc_bdr *)rxq;
481         struct enetc_eth_hw *eth_hw =
482                 ENETC_DEV_PRIVATE_TO_HW(rx_ring->ndev->data->dev_private);
483         struct enetc_swbd *q_swbd;
484         struct enetc_hw *hw;
485         uint32_t val;
486         int i;
487
488         /* Disable the ring */
489         hw = &eth_hw->hw;
490         val = enetc_rxbdr_rd(hw, rx_ring->index, ENETC_RBMR);
491         val &= (~ENETC_RBMR_EN);
492         enetc_rxbdr_wr(hw, rx_ring->index, ENETC_RBMR, val);
493
494         /* Clean the ring */
495         i = rx_ring->next_to_clean;
496         q_swbd = &rx_ring->q_swbd[i];
497         while (i != rx_ring->next_to_use) {
498                 rte_pktmbuf_free(q_swbd->buffer_addr);
499                 q_swbd->buffer_addr = NULL;
500                 q_swbd++;
501                 i++;
502                 if (unlikely(i == rx_ring->bd_count)) {
503                         i = 0;
504                         q_swbd = &rx_ring->q_swbd[i];
505                 }
506         }
507
508         enetc_free_bdr(rx_ring);
509         rte_free(rx_ring);
510 }
511
512 static
513 int enetc_stats_get(struct rte_eth_dev *dev,
514                     struct rte_eth_stats *stats)
515 {
516         struct enetc_eth_hw *hw =
517                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
518         struct enetc_hw *enetc_hw = &hw->hw;
519
520         /* Total received packets, bad + good, if we want to get counters of
521          * only good received packets then use ENETC_PM0_RFRM,
522          * ENETC_PM0_TFRM registers.
523          */
524         stats->ipackets = enetc_port_rd(enetc_hw, ENETC_PM0_RPKT);
525         stats->opackets = enetc_port_rd(enetc_hw, ENETC_PM0_TPKT);
526         stats->ibytes =  enetc_port_rd(enetc_hw, ENETC_PM0_REOCT);
527         stats->obytes = enetc_port_rd(enetc_hw, ENETC_PM0_TEOCT);
528         /* Dropped + Truncated packets, use ENETC_PM0_RDRNTP for without
529          * truncated packets
530          */
531         stats->imissed = enetc_port_rd(enetc_hw, ENETC_PM0_RDRP);
532         stats->ierrors = enetc_port_rd(enetc_hw, ENETC_PM0_RERR);
533         stats->oerrors = enetc_port_rd(enetc_hw, ENETC_PM0_TERR);
534
535         return 0;
536 }
537
538 static int
539 enetc_stats_reset(struct rte_eth_dev *dev)
540 {
541         struct enetc_eth_hw *hw =
542                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
543         struct enetc_hw *enetc_hw = &hw->hw;
544
545         enetc_port_wr(enetc_hw, ENETC_PM0_STAT_CONFIG, ENETC_CLEAR_STATS);
546
547         return 0;
548 }
549
550 static void
551 enetc_dev_close(struct rte_eth_dev *dev)
552 {
553         uint16_t i;
554
555         PMD_INIT_FUNC_TRACE();
556         enetc_dev_stop(dev);
557
558         for (i = 0; i < dev->data->nb_rx_queues; i++) {
559                 enetc_rx_queue_release(dev->data->rx_queues[i]);
560                 dev->data->rx_queues[i] = NULL;
561         }
562         dev->data->nb_rx_queues = 0;
563
564         for (i = 0; i < dev->data->nb_tx_queues; i++) {
565                 enetc_tx_queue_release(dev->data->tx_queues[i]);
566                 dev->data->tx_queues[i] = NULL;
567         }
568         dev->data->nb_tx_queues = 0;
569 }
570
571 static int
572 enetc_promiscuous_enable(struct rte_eth_dev *dev)
573 {
574         struct enetc_eth_hw *hw =
575                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
576         struct enetc_hw *enetc_hw = &hw->hw;
577         uint32_t psipmr = 0;
578
579         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
580
581         /* Setting to enable promiscuous mode*/
582         psipmr |= ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0);
583
584         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
585
586         return 0;
587 }
588
589 static int
590 enetc_promiscuous_disable(struct rte_eth_dev *dev)
591 {
592         struct enetc_eth_hw *hw =
593                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
594         struct enetc_hw *enetc_hw = &hw->hw;
595         uint32_t psipmr = 0;
596
597         /* Setting to disable promiscuous mode for SI0*/
598         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
599         psipmr &= (~ENETC_PSIPMR_SET_UP(0));
600
601         if (dev->data->all_multicast == 0)
602                 psipmr &= (~ENETC_PSIPMR_SET_MP(0));
603
604         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
605
606         return 0;
607 }
608
609 static int
610 enetc_allmulticast_enable(struct rte_eth_dev *dev)
611 {
612         struct enetc_eth_hw *hw =
613                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
614         struct enetc_hw *enetc_hw = &hw->hw;
615         uint32_t psipmr = 0;
616
617         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR);
618
619         /* Setting to enable allmulticast mode for SI0*/
620         psipmr |= ENETC_PSIPMR_SET_MP(0);
621
622         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
623
624         return 0;
625 }
626
627 static int
628 enetc_allmulticast_disable(struct rte_eth_dev *dev)
629 {
630         struct enetc_eth_hw *hw =
631                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
632         struct enetc_hw *enetc_hw = &hw->hw;
633         uint32_t psipmr = 0;
634
635         if (dev->data->promiscuous == 1)
636                 return 0; /* must remain in all_multicast mode */
637
638         /* Setting to disable all multicast mode for SI0*/
639         psipmr = enetc_port_rd(enetc_hw, ENETC_PSIPMR) &
640                                ~(ENETC_PSIPMR_SET_MP(0));
641
642         enetc_port_wr(enetc_hw, ENETC_PSIPMR, psipmr);
643
644         return 0;
645 }
646
647 static int
648 enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
649 {
650         struct enetc_eth_hw *hw =
651                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
652         struct enetc_hw *enetc_hw = &hw->hw;
653         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
654
655         /* check that mtu is within the allowed range */
656         if (mtu < ENETC_MAC_MINFRM_SIZE || frame_size > ENETC_MAC_MAXFRM_SIZE)
657                 return -EINVAL;
658
659         /*
660          * Refuse mtu that requires the support of scattered packets
661          * when this feature has not been enabled before.
662          */
663         if (dev->data->min_rx_buf_size &&
664                 !dev->data->scattered_rx && frame_size >
665                 dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
666                 ENETC_PMD_ERR("SG not enabled, will not fit in one buffer");
667                 return -EINVAL;
668         }
669
670         if (frame_size > RTE_ETHER_MAX_LEN)
671                 dev->data->dev_conf.rxmode.offloads &=
672                                                 DEV_RX_OFFLOAD_JUMBO_FRAME;
673         else
674                 dev->data->dev_conf.rxmode.offloads &=
675                                                 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
676
677         enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
678         enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
679
680         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
681
682         /*setting the MTU*/
683         enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
684                       ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
685
686         return 0;
687 }
688
689 static int
690 enetc_dev_configure(struct rte_eth_dev *dev)
691 {
692         struct enetc_eth_hw *hw =
693                 ENETC_DEV_PRIVATE_TO_HW(dev->data->dev_private);
694         struct enetc_hw *enetc_hw = &hw->hw;
695         struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
696         uint64_t rx_offloads = eth_conf->rxmode.offloads;
697         uint32_t checksum = L3_CKSUM | L4_CKSUM;
698
699         PMD_INIT_FUNC_TRACE();
700
701         if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
702                 uint32_t max_len;
703
704                 max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
705
706                 enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
707                               ENETC_SET_MAXFRM(max_len));
708                 enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
709                               ENETC_MAC_MAXFRM_SIZE);
710                 enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
711                               2 * ENETC_MAC_MAXFRM_SIZE);
712                 dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
713                         RTE_ETHER_CRC_LEN;
714         }
715
716         if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
717                 int config;
718
719                 config = enetc_port_rd(enetc_hw, ENETC_PM0_CMD_CFG);
720                 config |= ENETC_PM0_CRC;
721                 enetc_port_wr(enetc_hw, ENETC_PM0_CMD_CFG, config);
722         }
723
724         if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
725                 checksum &= ~L3_CKSUM;
726
727         if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM))
728                 checksum &= ~L4_CKSUM;
729
730         enetc_port_wr(enetc_hw, ENETC_PAR_PORT_CFG, checksum);
731
732
733         return 0;
734 }
735
736 static int
737 enetc_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
738 {
739         struct enetc_eth_adapter *priv =
740                         ENETC_DEV_PRIVATE(dev->data->dev_private);
741         struct enetc_bdr *rx_ring;
742         uint32_t rx_data;
743
744         rx_ring = dev->data->rx_queues[qidx];
745         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
746                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
747                                          ENETC_RBMR);
748                 rx_data = rx_data | ENETC_RBMR_EN;
749                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
750                                rx_data);
751                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
752         }
753
754         return 0;
755 }
756
757 static int
758 enetc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
759 {
760         struct enetc_eth_adapter *priv =
761                         ENETC_DEV_PRIVATE(dev->data->dev_private);
762         struct enetc_bdr *rx_ring;
763         uint32_t rx_data;
764
765         rx_ring = dev->data->rx_queues[qidx];
766         if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
767                 rx_data = enetc_rxbdr_rd(&priv->hw.hw, rx_ring->index,
768                                          ENETC_RBMR);
769                 rx_data = rx_data & (~ENETC_RBMR_EN);
770                 enetc_rxbdr_wr(&priv->hw.hw, rx_ring->index, ENETC_RBMR,
771                                rx_data);
772                 dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
773         }
774
775         return 0;
776 }
777
778 static int
779 enetc_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx)
780 {
781         struct enetc_eth_adapter *priv =
782                         ENETC_DEV_PRIVATE(dev->data->dev_private);
783         struct enetc_bdr *tx_ring;
784         uint32_t tx_data;
785
786         tx_ring = dev->data->tx_queues[qidx];
787         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) {
788                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
789                                          ENETC_TBMR);
790                 tx_data = tx_data | ENETC_TBMR_EN;
791                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
792                                tx_data);
793                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED;
794         }
795
796         return 0;
797 }
798
799 static int
800 enetc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
801 {
802         struct enetc_eth_adapter *priv =
803                         ENETC_DEV_PRIVATE(dev->data->dev_private);
804         struct enetc_bdr *tx_ring;
805         uint32_t tx_data;
806
807         tx_ring = dev->data->tx_queues[qidx];
808         if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) {
809                 tx_data = enetc_txbdr_rd(&priv->hw.hw, tx_ring->index,
810                                          ENETC_TBMR);
811                 tx_data = tx_data & (~ENETC_TBMR_EN);
812                 enetc_txbdr_wr(&priv->hw.hw, tx_ring->index, ENETC_TBMR,
813                                tx_data);
814                 dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED;
815         }
816
817         return 0;
818 }
819
820 /*
821  * The set of PCI devices this driver supports
822  */
823 static const struct rte_pci_id pci_id_enetc_map[] = {
824         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID) },
825         { RTE_PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_VF) },
826         { .vendor_id = 0, /* sentinel */ },
827 };
828
829 /* Features supported by this driver */
830 static const struct eth_dev_ops enetc_ops = {
831         .dev_configure        = enetc_dev_configure,
832         .dev_start            = enetc_dev_start,
833         .dev_stop             = enetc_dev_stop,
834         .dev_close            = enetc_dev_close,
835         .link_update          = enetc_link_update,
836         .stats_get            = enetc_stats_get,
837         .stats_reset          = enetc_stats_reset,
838         .promiscuous_enable   = enetc_promiscuous_enable,
839         .promiscuous_disable  = enetc_promiscuous_disable,
840         .allmulticast_enable  = enetc_allmulticast_enable,
841         .allmulticast_disable = enetc_allmulticast_disable,
842         .dev_infos_get        = enetc_dev_infos_get,
843         .mtu_set              = enetc_mtu_set,
844         .rx_queue_setup       = enetc_rx_queue_setup,
845         .rx_queue_start       = enetc_rx_queue_start,
846         .rx_queue_stop        = enetc_rx_queue_stop,
847         .rx_queue_release     = enetc_rx_queue_release,
848         .tx_queue_setup       = enetc_tx_queue_setup,
849         .tx_queue_start       = enetc_tx_queue_start,
850         .tx_queue_stop        = enetc_tx_queue_stop,
851         .tx_queue_release     = enetc_tx_queue_release,
852         .dev_supported_ptypes_get = enetc_supported_ptypes_get,
853 };
854
855 /**
856  * Initialisation of the enetc device
857  *
858  * @param eth_dev
859  *   - Pointer to the structure rte_eth_dev
860  *
861  * @return
862  *   - On success, zero.
863  *   - On failure, negative value.
864  */
865 static int
866 enetc_dev_init(struct rte_eth_dev *eth_dev)
867 {
868         int error = 0;
869         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
870         struct enetc_eth_hw *hw =
871                 ENETC_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
872
873         PMD_INIT_FUNC_TRACE();
874         eth_dev->dev_ops = &enetc_ops;
875         eth_dev->rx_pkt_burst = &enetc_recv_pkts;
876         eth_dev->tx_pkt_burst = &enetc_xmit_pkts;
877
878         /* Retrieving and storing the HW base address of device */
879         hw->hw.reg = (void *)pci_dev->mem_resource[0].addr;
880         hw->device_id = pci_dev->id.device_id;
881
882         error = enetc_hardware_init(hw);
883         if (error != 0) {
884                 ENETC_PMD_ERR("Hardware initialization failed");
885                 return -1;
886         }
887
888         /* Allocate memory for storing MAC addresses */
889         eth_dev->data->mac_addrs = rte_zmalloc("enetc_eth",
890                                         RTE_ETHER_ADDR_LEN, 0);
891         if (!eth_dev->data->mac_addrs) {
892                 ENETC_PMD_ERR("Failed to allocate %d bytes needed to "
893                               "store MAC addresses",
894                               RTE_ETHER_ADDR_LEN * 1);
895                 error = -ENOMEM;
896                 return -1;
897         }
898
899         /* Copy the permanent MAC address */
900         rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
901                         &eth_dev->data->mac_addrs[0]);
902
903         /* Set MTU */
904         enetc_port_wr(&hw->hw, ENETC_PM0_MAXFRM,
905                       ENETC_SET_MAXFRM(RTE_ETHER_MAX_LEN));
906         eth_dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
907                 RTE_ETHER_CRC_LEN;
908
909         if (rte_eal_iova_mode() == RTE_IOVA_PA)
910                 dpaax_iova_table_populate();
911
912         ENETC_PMD_DEBUG("port_id %d vendorID=0x%x deviceID=0x%x",
913                         eth_dev->data->port_id, pci_dev->id.vendor_id,
914                         pci_dev->id.device_id);
915         return 0;
916 }
917
918 static int
919 enetc_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
920 {
921         PMD_INIT_FUNC_TRACE();
922
923         if (rte_eal_iova_mode() == RTE_IOVA_PA)
924                 dpaax_iova_table_depopulate();
925
926         return 0;
927 }
928
929 static int
930 enetc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
931                            struct rte_pci_device *pci_dev)
932 {
933         return rte_eth_dev_pci_generic_probe(pci_dev,
934                                              sizeof(struct enetc_eth_adapter),
935                                              enetc_dev_init);
936 }
937
938 static int
939 enetc_pci_remove(struct rte_pci_device *pci_dev)
940 {
941         return rte_eth_dev_pci_generic_remove(pci_dev, enetc_dev_uninit);
942 }
943
944 static struct rte_pci_driver rte_enetc_pmd = {
945         .id_table = pci_id_enetc_map,
946         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
947         .probe = enetc_pci_probe,
948         .remove = enetc_pci_remove,
949 };
950
951 RTE_PMD_REGISTER_PCI(net_enetc, rte_enetc_pmd);
952 RTE_PMD_REGISTER_PCI_TABLE(net_enetc, pci_id_enetc_map);
953 RTE_PMD_REGISTER_KMOD_DEP(net_enetc, "* vfio-pci");
954
955 RTE_INIT(enetc_pmd_init_log)
956 {
957         enetc_logtype_pmd = rte_log_register("pmd.net.enetc");
958         if (enetc_logtype_pmd >= 0)
959                 rte_log_set_level(enetc_logtype_pmd, RTE_LOG_NOTICE);
960 }