net/failsafe: fix crash on slave queue release
[dpdk.git] / drivers / net / axgbe / axgbe_ethdev.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
9 #include "axgbe_phy.h"
10
11 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
12 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
13 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
14 static int  axgbe_dev_start(struct rte_eth_dev *dev);
15 static void axgbe_dev_stop(struct rte_eth_dev *dev);
16 static void axgbe_dev_interrupt_handler(void *param);
17 static void axgbe_dev_close(struct rte_eth_dev *dev);
18 static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
19 static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
20 static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
21 static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
22 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
23                                  int wait_to_complete);
24 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
25                                 struct rte_eth_stats *stats);
26 static void axgbe_dev_stats_reset(struct rte_eth_dev *dev);
27 static void axgbe_dev_info_get(struct rte_eth_dev *dev,
28                                struct rte_eth_dev_info *dev_info);
29
30 /* The set of PCI devices this driver supports */
31 #define AMD_PCI_VENDOR_ID       0x1022
32 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
33 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
34
35 int axgbe_logtype_init;
36 int axgbe_logtype_driver;
37
38 static const struct rte_pci_id pci_id_axgbe_map[] = {
39         {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
40         {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
41         { .vendor_id = 0, },
42 };
43
44 static struct axgbe_version_data axgbe_v2a = {
45         .init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
46         .xpcs_access                    = AXGBE_XPCS_ACCESS_V2,
47         .mmc_64bit                      = 1,
48         .tx_max_fifo_size               = 229376,
49         .rx_max_fifo_size               = 229376,
50         .tx_tstamp_workaround           = 1,
51         .ecc_support                    = 1,
52         .i2c_support                    = 1,
53         .an_cdr_workaround              = 1,
54 };
55
56 static struct axgbe_version_data axgbe_v2b = {
57         .init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
58         .xpcs_access                    = AXGBE_XPCS_ACCESS_V2,
59         .mmc_64bit                      = 1,
60         .tx_max_fifo_size               = 65536,
61         .rx_max_fifo_size               = 65536,
62         .tx_tstamp_workaround           = 1,
63         .ecc_support                    = 1,
64         .i2c_support                    = 1,
65         .an_cdr_workaround              = 1,
66 };
67
68 static const struct rte_eth_desc_lim rx_desc_lim = {
69         .nb_max = AXGBE_MAX_RING_DESC,
70         .nb_min = AXGBE_MIN_RING_DESC,
71         .nb_align = 8,
72 };
73
74 static const struct rte_eth_desc_lim tx_desc_lim = {
75         .nb_max = AXGBE_MAX_RING_DESC,
76         .nb_min = AXGBE_MIN_RING_DESC,
77         .nb_align = 8,
78 };
79
80 static const struct eth_dev_ops axgbe_eth_dev_ops = {
81         .dev_configure        = axgbe_dev_configure,
82         .dev_start            = axgbe_dev_start,
83         .dev_stop             = axgbe_dev_stop,
84         .dev_close            = axgbe_dev_close,
85         .promiscuous_enable   = axgbe_dev_promiscuous_enable,
86         .promiscuous_disable  = axgbe_dev_promiscuous_disable,
87         .allmulticast_enable  = axgbe_dev_allmulticast_enable,
88         .allmulticast_disable = axgbe_dev_allmulticast_disable,
89         .link_update          = axgbe_dev_link_update,
90         .stats_get            = axgbe_dev_stats_get,
91         .stats_reset          = axgbe_dev_stats_reset,
92         .dev_infos_get        = axgbe_dev_info_get,
93         .rx_queue_setup       = axgbe_dev_rx_queue_setup,
94         .rx_queue_release     = axgbe_dev_rx_queue_release,
95         .tx_queue_setup       = axgbe_dev_tx_queue_setup,
96         .tx_queue_release     = axgbe_dev_tx_queue_release,
97 };
98
99 static int axgbe_phy_reset(struct axgbe_port *pdata)
100 {
101         pdata->phy_link = -1;
102         pdata->phy_speed = SPEED_UNKNOWN;
103         return pdata->phy_if.phy_reset(pdata);
104 }
105
106 /*
107  * Interrupt handler triggered by NIC  for handling
108  * specific interrupt.
109  *
110  * @param handle
111  *  Pointer to interrupt handle.
112  * @param param
113  *  The address of parameter (struct rte_eth_dev *) regsitered before.
114  *
115  * @return
116  *  void
117  */
118 static void
119 axgbe_dev_interrupt_handler(void *param)
120 {
121         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
122         struct axgbe_port *pdata = dev->data->dev_private;
123         unsigned int dma_isr, dma_ch_isr;
124
125         pdata->phy_if.an_isr(pdata);
126         /*DMA related interrupts*/
127         dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
128         if (dma_isr) {
129                 if (dma_isr & 1) {
130                         dma_ch_isr =
131                                 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
132                                                   pdata->rx_queues[0],
133                                                   DMA_CH_SR);
134                         AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
135                                            pdata->rx_queues[0],
136                                            DMA_CH_SR, dma_ch_isr);
137                 }
138         }
139         /* Enable interrupts since disabled after generation*/
140         rte_intr_enable(&pdata->pci_dev->intr_handle);
141 }
142
143 /*
144  * Configure device link speed and setup link.
145  * It returns 0 on success.
146  */
147 static int
148 axgbe_dev_configure(struct rte_eth_dev *dev)
149 {
150         struct axgbe_port *pdata =  dev->data->dev_private;
151         /* Checksum offload to hardware */
152         pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
153                                 DEV_RX_OFFLOAD_CHECKSUM;
154         return 0;
155 }
156
157 static int
158 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
159 {
160         struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
161
162         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
163                 pdata->rss_enable = 1;
164         else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
165                 pdata->rss_enable = 0;
166         else
167                 return  -1;
168         return 0;
169 }
170
171 static int
172 axgbe_dev_start(struct rte_eth_dev *dev)
173 {
174         PMD_INIT_FUNC_TRACE();
175         struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
176         int ret;
177
178         /* Multiqueue RSS */
179         ret = axgbe_dev_rx_mq_config(dev);
180         if (ret) {
181                 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
182                 return ret;
183         }
184         ret = axgbe_phy_reset(pdata);
185         if (ret) {
186                 PMD_DRV_LOG(ERR, "phy reset failed\n");
187                 return ret;
188         }
189         ret = pdata->hw_if.init(pdata);
190         if (ret) {
191                 PMD_DRV_LOG(ERR, "dev_init failed\n");
192                 return ret;
193         }
194
195         /* enable uio/vfio intr/eventfd mapping */
196         rte_intr_enable(&pdata->pci_dev->intr_handle);
197
198         /* phy start*/
199         pdata->phy_if.phy_start(pdata);
200         axgbe_dev_enable_tx(dev);
201         axgbe_dev_enable_rx(dev);
202
203         axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
204         axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
205         return 0;
206 }
207
208 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
209 static void
210 axgbe_dev_stop(struct rte_eth_dev *dev)
211 {
212         PMD_INIT_FUNC_TRACE();
213         struct axgbe_port *pdata = dev->data->dev_private;
214
215         rte_intr_disable(&pdata->pci_dev->intr_handle);
216
217         if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
218                 return;
219
220         axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
221         axgbe_dev_disable_tx(dev);
222         axgbe_dev_disable_rx(dev);
223
224         pdata->phy_if.phy_stop(pdata);
225         pdata->hw_if.exit(pdata);
226         memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
227         axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
228 }
229
230 /* Clear all resources like TX/RX queues. */
231 static void
232 axgbe_dev_close(struct rte_eth_dev *dev)
233 {
234         axgbe_dev_clear_queues(dev);
235 }
236
237 static void
238 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
239 {
240         PMD_INIT_FUNC_TRACE();
241         struct axgbe_port *pdata = dev->data->dev_private;
242
243         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
244 }
245
246 static void
247 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
248 {
249         PMD_INIT_FUNC_TRACE();
250         struct axgbe_port *pdata = dev->data->dev_private;
251
252         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
253 }
254
255 static void
256 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
257 {
258         PMD_INIT_FUNC_TRACE();
259         struct axgbe_port *pdata = dev->data->dev_private;
260
261         if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
262                 return;
263         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
264 }
265
266 static void
267 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
268 {
269         PMD_INIT_FUNC_TRACE();
270         struct axgbe_port *pdata = dev->data->dev_private;
271
272         if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
273                 return;
274         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
275 }
276
277 /* return 0 means link status changed, -1 means not changed */
278 static int
279 axgbe_dev_link_update(struct rte_eth_dev *dev,
280                       int wait_to_complete __rte_unused)
281 {
282         struct axgbe_port *pdata = dev->data->dev_private;
283         struct rte_eth_link link;
284         int ret = 0;
285
286         PMD_INIT_FUNC_TRACE();
287         rte_delay_ms(800);
288
289         pdata->phy_if.phy_status(pdata);
290
291         memset(&link, 0, sizeof(struct rte_eth_link));
292         link.link_duplex = pdata->phy.duplex;
293         link.link_status = pdata->phy_link;
294         link.link_speed = pdata->phy_speed;
295         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
296                               ETH_LINK_SPEED_FIXED);
297         ret = rte_eth_linkstatus_set(dev, &link);
298         if (ret == -1)
299                 PMD_DRV_LOG(ERR, "No change in link status\n");
300
301         return ret;
302 }
303
304 static int
305 axgbe_dev_stats_get(struct rte_eth_dev *dev,
306                     struct rte_eth_stats *stats)
307 {
308         struct axgbe_rx_queue *rxq;
309         struct axgbe_tx_queue *txq;
310         unsigned int i;
311
312         for (i = 0; i < dev->data->nb_rx_queues; i++) {
313                 rxq = dev->data->rx_queues[i];
314                 stats->q_ipackets[i] = rxq->pkts;
315                 stats->ipackets += rxq->pkts;
316                 stats->q_ibytes[i] = rxq->bytes;
317                 stats->ibytes += rxq->bytes;
318         }
319         for (i = 0; i < dev->data->nb_tx_queues; i++) {
320                 txq = dev->data->tx_queues[i];
321                 stats->q_opackets[i] = txq->pkts;
322                 stats->opackets += txq->pkts;
323                 stats->q_obytes[i] = txq->bytes;
324                 stats->obytes += txq->bytes;
325         }
326
327         return 0;
328 }
329
330 static void
331 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
332 {
333         struct axgbe_rx_queue *rxq;
334         struct axgbe_tx_queue *txq;
335         unsigned int i;
336
337         for (i = 0; i < dev->data->nb_rx_queues; i++) {
338                 rxq = dev->data->rx_queues[i];
339                 rxq->pkts = 0;
340                 rxq->bytes = 0;
341                 rxq->errors = 0;
342         }
343         for (i = 0; i < dev->data->nb_tx_queues; i++) {
344                 txq = dev->data->tx_queues[i];
345                 txq->pkts = 0;
346                 txq->bytes = 0;
347                 txq->errors = 0;
348         }
349 }
350
351 static void
352 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
353 {
354         struct axgbe_port *pdata = dev->data->dev_private;
355
356         dev_info->max_rx_queues = pdata->rx_ring_count;
357         dev_info->max_tx_queues = pdata->tx_ring_count;
358         dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
359         dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
360         dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
361         dev_info->speed_capa =  ETH_LINK_SPEED_10G;
362
363         dev_info->rx_offload_capa =
364                 DEV_RX_OFFLOAD_IPV4_CKSUM |
365                 DEV_RX_OFFLOAD_UDP_CKSUM  |
366                 DEV_RX_OFFLOAD_TCP_CKSUM  |
367                 DEV_RX_OFFLOAD_KEEP_CRC;
368
369         dev_info->tx_offload_capa =
370                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
371                 DEV_TX_OFFLOAD_UDP_CKSUM   |
372                 DEV_TX_OFFLOAD_TCP_CKSUM;
373
374         if (pdata->hw_feat.rss) {
375                 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
376                 dev_info->reta_size = pdata->hw_feat.hash_table_size;
377                 dev_info->hash_key_size =  AXGBE_RSS_HASH_KEY_SIZE;
378         }
379
380         dev_info->rx_desc_lim = rx_desc_lim;
381         dev_info->tx_desc_lim = tx_desc_lim;
382
383         dev_info->default_rxconf = (struct rte_eth_rxconf) {
384                 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
385         };
386
387         dev_info->default_txconf = (struct rte_eth_txconf) {
388                 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
389         };
390 }
391
392 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
393 {
394         unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
395         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
396
397         mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
398         mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
399         mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
400
401         memset(hw_feat, 0, sizeof(*hw_feat));
402
403         hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
404
405         /* Hardware feature register 0 */
406         hw_feat->gmii        = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
407         hw_feat->vlhash      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
408         hw_feat->sma         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
409         hw_feat->rwk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
410         hw_feat->mgk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
411         hw_feat->mmc         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
412         hw_feat->aoe         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
413         hw_feat->ts          = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
414         hw_feat->eee         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
415         hw_feat->tx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
416         hw_feat->rx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
417         hw_feat->addn_mac    = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
418                                               ADDMACADRSEL);
419         hw_feat->ts_src      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
420         hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
421
422         /* Hardware feature register 1 */
423         hw_feat->rx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
424                                                 RXFIFOSIZE);
425         hw_feat->tx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
426                                                 TXFIFOSIZE);
427         hw_feat->adv_ts_hi     = AXGMAC_GET_BITS(mac_hfr1,
428                                                  MAC_HWF1R, ADVTHWORD);
429         hw_feat->dma_width     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
430         hw_feat->dcb           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
431         hw_feat->sph           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
432         hw_feat->tso           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
433         hw_feat->dma_debug     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
434         hw_feat->rss           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
435         hw_feat->tc_cnt        = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
436         hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
437                                                   HASHTBLSZ);
438         hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
439                                                   L3L4FNUM);
440
441         /* Hardware feature register 2 */
442         hw_feat->rx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
443         hw_feat->tx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
444         hw_feat->rx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
445         hw_feat->tx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
446         hw_feat->pps_out_num  = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
447         hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
448                                                 AUXSNAPNUM);
449
450         /* Translate the Hash Table size into actual number */
451         switch (hw_feat->hash_table_size) {
452         case 0:
453                 break;
454         case 1:
455                 hw_feat->hash_table_size = 64;
456                 break;
457         case 2:
458                 hw_feat->hash_table_size = 128;
459                 break;
460         case 3:
461                 hw_feat->hash_table_size = 256;
462                 break;
463         }
464
465         /* Translate the address width setting into actual number */
466         switch (hw_feat->dma_width) {
467         case 0:
468                 hw_feat->dma_width = 32;
469                 break;
470         case 1:
471                 hw_feat->dma_width = 40;
472                 break;
473         case 2:
474                 hw_feat->dma_width = 48;
475                 break;
476         default:
477                 hw_feat->dma_width = 32;
478         }
479
480         /* The Queue, Channel and TC counts are zero based so increment them
481          * to get the actual number
482          */
483         hw_feat->rx_q_cnt++;
484         hw_feat->tx_q_cnt++;
485         hw_feat->rx_ch_cnt++;
486         hw_feat->tx_ch_cnt++;
487         hw_feat->tc_cnt++;
488
489         /* Translate the fifo sizes into actual numbers */
490         hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
491         hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
492 }
493
494 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
495 {
496         axgbe_init_function_ptrs_dev(&pdata->hw_if);
497         axgbe_init_function_ptrs_phy(&pdata->phy_if);
498         axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
499         pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
500 }
501
502 static void axgbe_set_counts(struct axgbe_port *pdata)
503 {
504         /* Set all the function pointers */
505         axgbe_init_all_fptrs(pdata);
506
507         /* Populate the hardware features */
508         axgbe_get_all_hw_features(pdata);
509
510         /* Set default max values if not provided */
511         if (!pdata->tx_max_channel_count)
512                 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
513         if (!pdata->rx_max_channel_count)
514                 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
515
516         if (!pdata->tx_max_q_count)
517                 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
518         if (!pdata->rx_max_q_count)
519                 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
520
521         /* Calculate the number of Tx and Rx rings to be created
522          *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
523          *   the number of Tx queues to the number of Tx channels
524          *   enabled
525          *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
526          *   number of Rx queues or maximum allowed
527          */
528         pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
529                                      pdata->tx_max_channel_count);
530         pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
531                                      pdata->tx_max_q_count);
532
533         pdata->tx_q_count = pdata->tx_ring_count;
534
535         pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
536                                      pdata->rx_max_channel_count);
537
538         pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
539                                   pdata->rx_max_q_count);
540 }
541
542 static void axgbe_default_config(struct axgbe_port *pdata)
543 {
544         pdata->pblx8 = DMA_PBL_X8_ENABLE;
545         pdata->tx_sf_mode = MTL_TSF_ENABLE;
546         pdata->tx_threshold = MTL_TX_THRESHOLD_64;
547         pdata->tx_pbl = DMA_PBL_32;
548         pdata->tx_osp_mode = DMA_OSP_ENABLE;
549         pdata->rx_sf_mode = MTL_RSF_ENABLE;
550         pdata->rx_threshold = MTL_RX_THRESHOLD_64;
551         pdata->rx_pbl = DMA_PBL_32;
552         pdata->pause_autoneg = 1;
553         pdata->tx_pause = 0;
554         pdata->rx_pause = 0;
555         pdata->phy_speed = SPEED_UNKNOWN;
556         pdata->power_down = 0;
557 }
558
559 /*
560  * It returns 0 on success.
561  */
562 static int
563 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
564 {
565         PMD_INIT_FUNC_TRACE();
566         struct axgbe_port *pdata;
567         struct rte_pci_device *pci_dev;
568         uint32_t reg, mac_lo, mac_hi;
569         int ret;
570
571         eth_dev->dev_ops = &axgbe_eth_dev_ops;
572         eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
573
574         /*
575          * For secondary processes, we don't initialise any further as primary
576          * has already done this work.
577          */
578         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
579                 return 0;
580
581         pdata = (struct axgbe_port *)eth_dev->data->dev_private;
582         /* initial state */
583         axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
584         axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
585         pdata->eth_dev = eth_dev;
586
587         pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
588         pdata->pci_dev = pci_dev;
589
590         pdata->xgmac_regs =
591                 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
592         pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
593                                      + AXGBE_MAC_PROP_OFFSET);
594         pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
595                                     + AXGBE_I2C_CTRL_OFFSET);
596         pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
597
598         /* version specific driver data*/
599         if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
600                 pdata->vdata = &axgbe_v2a;
601         else
602                 pdata->vdata = &axgbe_v2b;
603
604         /* Configure the PCS indirect addressing support */
605         reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
606         pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
607         pdata->xpcs_window <<= 6;
608         pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
609         pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
610         pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
611         pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
612         pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
613         PMD_INIT_LOG(DEBUG,
614                      "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
615                      pdata->xpcs_window_size, pdata->xpcs_window_mask);
616         XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
617
618         /* Retrieve the MAC address */
619         mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
620         mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
621         pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
622         pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
623         pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
624         pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
625         pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
626         pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
627
628         eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
629                                                ETHER_ADDR_LEN, 0);
630         if (!eth_dev->data->mac_addrs) {
631                 PMD_INIT_LOG(ERR,
632                              "Failed to alloc %u bytes needed to store MAC addr tbl",
633                              ETHER_ADDR_LEN);
634                 return -ENOMEM;
635         }
636
637         if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
638                 eth_random_addr(pdata->mac_addr.addr_bytes);
639
640         /* Copy the permanent MAC address */
641         ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
642
643         /* Clock settings */
644         pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
645         pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
646
647         /* Set the DMA coherency values */
648         pdata->coherent = 1;
649         pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
650         pdata->arcache = AXGBE_DMA_OS_ARCACHE;
651         pdata->awcache = AXGBE_DMA_OS_AWCACHE;
652
653         /* Set the maximum channels and queues */
654         reg = XP_IOREAD(pdata, XP_PROP_1);
655         pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
656         pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
657         pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
658         pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
659
660         /* Set the hardware channel and queue counts */
661         axgbe_set_counts(pdata);
662
663         /* Set the maximum fifo amounts */
664         reg = XP_IOREAD(pdata, XP_PROP_2);
665         pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
666         pdata->tx_max_fifo_size *= 16384;
667         pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
668                                           pdata->vdata->tx_max_fifo_size);
669         pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
670         pdata->rx_max_fifo_size *= 16384;
671         pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
672                                           pdata->vdata->rx_max_fifo_size);
673         /* Issue software reset to DMA */
674         ret = pdata->hw_if.exit(pdata);
675         if (ret)
676                 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
677
678         /* Set default configuration data */
679         axgbe_default_config(pdata);
680
681         /* Set default max values if not provided */
682         if (!pdata->tx_max_fifo_size)
683                 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
684         if (!pdata->rx_max_fifo_size)
685                 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
686
687         pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
688         pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
689         pthread_mutex_init(&pdata->xpcs_mutex, NULL);
690         pthread_mutex_init(&pdata->i2c_mutex, NULL);
691         pthread_mutex_init(&pdata->an_mutex, NULL);
692         pthread_mutex_init(&pdata->phy_mutex, NULL);
693
694         ret = pdata->phy_if.phy_init(pdata);
695         if (ret) {
696                 rte_free(eth_dev->data->mac_addrs);
697                 return ret;
698         }
699
700         rte_intr_callback_register(&pci_dev->intr_handle,
701                                    axgbe_dev_interrupt_handler,
702                                    (void *)eth_dev);
703         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
704                      eth_dev->data->port_id, pci_dev->id.vendor_id,
705                      pci_dev->id.device_id);
706
707         return 0;
708 }
709
710 static int
711 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
712 {
713         struct rte_pci_device *pci_dev;
714
715         PMD_INIT_FUNC_TRACE();
716
717         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
718                 return 0;
719
720         pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
721         /*Free macaddres*/
722         rte_free(eth_dev->data->mac_addrs);
723         eth_dev->data->mac_addrs = NULL;
724         eth_dev->dev_ops = NULL;
725         eth_dev->rx_pkt_burst = NULL;
726         eth_dev->tx_pkt_burst = NULL;
727         axgbe_dev_clear_queues(eth_dev);
728
729         /* disable uio intr before callback unregister */
730         rte_intr_disable(&pci_dev->intr_handle);
731         rte_intr_callback_unregister(&pci_dev->intr_handle,
732                                      axgbe_dev_interrupt_handler,
733                                      (void *)eth_dev);
734
735         return 0;
736 }
737
738 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
739         struct rte_pci_device *pci_dev)
740 {
741         return rte_eth_dev_pci_generic_probe(pci_dev,
742                 sizeof(struct axgbe_port), eth_axgbe_dev_init);
743 }
744
745 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
746 {
747         return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
748 }
749
750 static struct rte_pci_driver rte_axgbe_pmd = {
751         .id_table = pci_id_axgbe_map,
752         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
753         .probe = eth_axgbe_pci_probe,
754         .remove = eth_axgbe_pci_remove,
755 };
756
757 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
758 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
759 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
760
761 RTE_INIT(axgbe_init_log)
762 {
763         axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
764         if (axgbe_logtype_init >= 0)
765                 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
766         axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
767         if (axgbe_logtype_driver >= 0)
768                 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
769 }