net/axgbe: support 32-bit build mode
[dpdk.git] / drivers / net / axgbe / axgbe_ethdev.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
9 #include "axgbe_phy.h"
10
11 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
12 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
13 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
14 static int  axgbe_dev_start(struct rte_eth_dev *dev);
15 static void axgbe_dev_stop(struct rte_eth_dev *dev);
16 static void axgbe_dev_interrupt_handler(void *param);
17 static void axgbe_dev_close(struct rte_eth_dev *dev);
18 static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
19 static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
20 static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
21 static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
22 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
23                                  int wait_to_complete);
24 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
25                                 struct rte_eth_stats *stats);
26 static void axgbe_dev_stats_reset(struct rte_eth_dev *dev);
27 static void axgbe_dev_info_get(struct rte_eth_dev *dev,
28                                struct rte_eth_dev_info *dev_info);
29
30 /* The set of PCI devices this driver supports */
31 #define AMD_PCI_VENDOR_ID       0x1022
32 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
33 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
34
35 int axgbe_logtype_init;
36 int axgbe_logtype_driver;
37
38 static const struct rte_pci_id pci_id_axgbe_map[] = {
39         {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
40         {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
41         { .vendor_id = 0, },
42 };
43
44 static struct axgbe_version_data axgbe_v2a = {
45         .init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
46         .xpcs_access                    = AXGBE_XPCS_ACCESS_V2,
47         .mmc_64bit                      = 1,
48         .tx_max_fifo_size               = 229376,
49         .rx_max_fifo_size               = 229376,
50         .tx_tstamp_workaround           = 1,
51         .ecc_support                    = 1,
52         .i2c_support                    = 1,
53 };
54
55 static struct axgbe_version_data axgbe_v2b = {
56         .init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
57         .xpcs_access                    = AXGBE_XPCS_ACCESS_V2,
58         .mmc_64bit                      = 1,
59         .tx_max_fifo_size               = 65536,
60         .rx_max_fifo_size               = 65536,
61         .tx_tstamp_workaround           = 1,
62         .ecc_support                    = 1,
63         .i2c_support                    = 1,
64 };
65
66 static const struct rte_eth_desc_lim rx_desc_lim = {
67         .nb_max = AXGBE_MAX_RING_DESC,
68         .nb_min = AXGBE_MIN_RING_DESC,
69         .nb_align = 8,
70 };
71
72 static const struct rte_eth_desc_lim tx_desc_lim = {
73         .nb_max = AXGBE_MAX_RING_DESC,
74         .nb_min = AXGBE_MIN_RING_DESC,
75         .nb_align = 8,
76 };
77
78 static const struct eth_dev_ops axgbe_eth_dev_ops = {
79         .dev_configure        = axgbe_dev_configure,
80         .dev_start            = axgbe_dev_start,
81         .dev_stop             = axgbe_dev_stop,
82         .dev_close            = axgbe_dev_close,
83         .promiscuous_enable   = axgbe_dev_promiscuous_enable,
84         .promiscuous_disable  = axgbe_dev_promiscuous_disable,
85         .allmulticast_enable  = axgbe_dev_allmulticast_enable,
86         .allmulticast_disable = axgbe_dev_allmulticast_disable,
87         .link_update          = axgbe_dev_link_update,
88         .stats_get            = axgbe_dev_stats_get,
89         .stats_reset          = axgbe_dev_stats_reset,
90         .dev_infos_get        = axgbe_dev_info_get,
91         .rx_queue_setup       = axgbe_dev_rx_queue_setup,
92         .rx_queue_release     = axgbe_dev_rx_queue_release,
93         .tx_queue_setup       = axgbe_dev_tx_queue_setup,
94         .tx_queue_release     = axgbe_dev_tx_queue_release,
95 };
96
97 static int axgbe_phy_reset(struct axgbe_port *pdata)
98 {
99         pdata->phy_link = -1;
100         pdata->phy_speed = SPEED_UNKNOWN;
101         return pdata->phy_if.phy_reset(pdata);
102 }
103
104 /*
105  * Interrupt handler triggered by NIC  for handling
106  * specific interrupt.
107  *
108  * @param handle
109  *  Pointer to interrupt handle.
110  * @param param
111  *  The address of parameter (struct rte_eth_dev *) regsitered before.
112  *
113  * @return
114  *  void
115  */
116 static void
117 axgbe_dev_interrupt_handler(void *param)
118 {
119         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
120         struct axgbe_port *pdata = dev->data->dev_private;
121         unsigned int dma_isr, dma_ch_isr;
122
123         pdata->phy_if.an_isr(pdata);
124         /*DMA related interrupts*/
125         dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
126         if (dma_isr) {
127                 if (dma_isr & 1) {
128                         dma_ch_isr =
129                                 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
130                                                   pdata->rx_queues[0],
131                                                   DMA_CH_SR);
132                         AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
133                                            pdata->rx_queues[0],
134                                            DMA_CH_SR, dma_ch_isr);
135                 }
136         }
137         /* Enable interrupts since disabled after generation*/
138         rte_intr_enable(&pdata->pci_dev->intr_handle);
139 }
140
141 /*
142  * Configure device link speed and setup link.
143  * It returns 0 on success.
144  */
145 static int
146 axgbe_dev_configure(struct rte_eth_dev *dev)
147 {
148         struct axgbe_port *pdata =  dev->data->dev_private;
149         /* Checksum offload to hardware */
150         pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
151                                 DEV_RX_OFFLOAD_CHECKSUM;
152         return 0;
153 }
154
155 static int
156 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
157 {
158         struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
159
160         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
161                 pdata->rss_enable = 1;
162         else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
163                 pdata->rss_enable = 0;
164         else
165                 return  -1;
166         return 0;
167 }
168
169 static int
170 axgbe_dev_start(struct rte_eth_dev *dev)
171 {
172         PMD_INIT_FUNC_TRACE();
173         struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
174         int ret;
175
176         /* Multiqueue RSS */
177         ret = axgbe_dev_rx_mq_config(dev);
178         if (ret) {
179                 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
180                 return ret;
181         }
182         ret = axgbe_phy_reset(pdata);
183         if (ret) {
184                 PMD_DRV_LOG(ERR, "phy reset failed\n");
185                 return ret;
186         }
187         ret = pdata->hw_if.init(pdata);
188         if (ret) {
189                 PMD_DRV_LOG(ERR, "dev_init failed\n");
190                 return ret;
191         }
192
193         /* enable uio/vfio intr/eventfd mapping */
194         rte_intr_enable(&pdata->pci_dev->intr_handle);
195
196         /* phy start*/
197         pdata->phy_if.phy_start(pdata);
198         axgbe_dev_enable_tx(dev);
199         axgbe_dev_enable_rx(dev);
200
201         axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
202         axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
203         return 0;
204 }
205
206 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
207 static void
208 axgbe_dev_stop(struct rte_eth_dev *dev)
209 {
210         PMD_INIT_FUNC_TRACE();
211         struct axgbe_port *pdata = dev->data->dev_private;
212
213         rte_intr_disable(&pdata->pci_dev->intr_handle);
214
215         if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
216                 return;
217
218         axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
219         axgbe_dev_disable_tx(dev);
220         axgbe_dev_disable_rx(dev);
221
222         pdata->phy_if.phy_stop(pdata);
223         pdata->hw_if.exit(pdata);
224         memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
225         axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
226 }
227
228 /* Clear all resources like TX/RX queues. */
229 static void
230 axgbe_dev_close(struct rte_eth_dev *dev)
231 {
232         axgbe_dev_clear_queues(dev);
233 }
234
235 static void
236 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
237 {
238         PMD_INIT_FUNC_TRACE();
239         struct axgbe_port *pdata = dev->data->dev_private;
240
241         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
242 }
243
244 static void
245 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
246 {
247         PMD_INIT_FUNC_TRACE();
248         struct axgbe_port *pdata = dev->data->dev_private;
249
250         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
251 }
252
253 static void
254 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
255 {
256         PMD_INIT_FUNC_TRACE();
257         struct axgbe_port *pdata = dev->data->dev_private;
258
259         if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
260                 return;
261         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
262 }
263
264 static void
265 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
266 {
267         PMD_INIT_FUNC_TRACE();
268         struct axgbe_port *pdata = dev->data->dev_private;
269
270         if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
271                 return;
272         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
273 }
274
275 /* return 0 means link status changed, -1 means not changed */
276 static int
277 axgbe_dev_link_update(struct rte_eth_dev *dev,
278                       int wait_to_complete __rte_unused)
279 {
280         struct axgbe_port *pdata = dev->data->dev_private;
281         struct rte_eth_link link;
282         int ret = 0;
283
284         PMD_INIT_FUNC_TRACE();
285         rte_delay_ms(800);
286
287         pdata->phy_if.phy_status(pdata);
288
289         memset(&link, 0, sizeof(struct rte_eth_link));
290         link.link_duplex = pdata->phy.duplex;
291         link.link_status = pdata->phy_link;
292         link.link_speed = pdata->phy_speed;
293         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
294                               ETH_LINK_SPEED_FIXED);
295         ret = rte_eth_linkstatus_set(dev, &link);
296         if (ret == -1)
297                 PMD_DRV_LOG(ERR, "No change in link status\n");
298
299         return ret;
300 }
301
302 static int
303 axgbe_dev_stats_get(struct rte_eth_dev *dev,
304                     struct rte_eth_stats *stats)
305 {
306         struct axgbe_rx_queue *rxq;
307         struct axgbe_tx_queue *txq;
308         unsigned int i;
309
310         for (i = 0; i < dev->data->nb_rx_queues; i++) {
311                 rxq = dev->data->rx_queues[i];
312                 stats->q_ipackets[i] = rxq->pkts;
313                 stats->ipackets += rxq->pkts;
314                 stats->q_ibytes[i] = rxq->bytes;
315                 stats->ibytes += rxq->bytes;
316         }
317         for (i = 0; i < dev->data->nb_tx_queues; i++) {
318                 txq = dev->data->tx_queues[i];
319                 stats->q_opackets[i] = txq->pkts;
320                 stats->opackets += txq->pkts;
321                 stats->q_obytes[i] = txq->bytes;
322                 stats->obytes += txq->bytes;
323         }
324
325         return 0;
326 }
327
328 static void
329 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
330 {
331         struct axgbe_rx_queue *rxq;
332         struct axgbe_tx_queue *txq;
333         unsigned int i;
334
335         for (i = 0; i < dev->data->nb_rx_queues; i++) {
336                 rxq = dev->data->rx_queues[i];
337                 rxq->pkts = 0;
338                 rxq->bytes = 0;
339                 rxq->errors = 0;
340         }
341         for (i = 0; i < dev->data->nb_tx_queues; i++) {
342                 txq = dev->data->tx_queues[i];
343                 txq->pkts = 0;
344                 txq->bytes = 0;
345                 txq->errors = 0;
346         }
347 }
348
349 static void
350 axgbe_dev_info_get(struct rte_eth_dev *dev,
351                    struct rte_eth_dev_info *dev_info)
352 {
353         struct axgbe_port *pdata = dev->data->dev_private;
354
355         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
356         dev_info->max_rx_queues = pdata->rx_ring_count;
357         dev_info->max_tx_queues = pdata->tx_ring_count;
358         dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
359         dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
360         dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
361         dev_info->speed_capa =  ETH_LINK_SPEED_10G;
362
363         dev_info->rx_offload_capa =
364                 DEV_RX_OFFLOAD_IPV4_CKSUM |
365                 DEV_RX_OFFLOAD_UDP_CKSUM  |
366                 DEV_RX_OFFLOAD_TCP_CKSUM;
367
368         dev_info->tx_offload_capa =
369                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
370                 DEV_TX_OFFLOAD_UDP_CKSUM   |
371                 DEV_TX_OFFLOAD_TCP_CKSUM;
372
373         if (pdata->hw_feat.rss) {
374                 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
375                 dev_info->reta_size = pdata->hw_feat.hash_table_size;
376                 dev_info->hash_key_size =  AXGBE_RSS_HASH_KEY_SIZE;
377         }
378
379         dev_info->rx_desc_lim = rx_desc_lim;
380         dev_info->tx_desc_lim = tx_desc_lim;
381
382         dev_info->default_rxconf = (struct rte_eth_rxconf) {
383                 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
384         };
385
386         dev_info->default_txconf = (struct rte_eth_txconf) {
387                 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
388                 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
389                                 ETH_TXQ_FLAGS_NOOFFLOADS,
390         };
391 }
392
393 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
394 {
395         unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
396         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
397
398         mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
399         mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
400         mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
401
402         memset(hw_feat, 0, sizeof(*hw_feat));
403
404         hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
405
406         /* Hardware feature register 0 */
407         hw_feat->gmii        = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
408         hw_feat->vlhash      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
409         hw_feat->sma         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
410         hw_feat->rwk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
411         hw_feat->mgk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
412         hw_feat->mmc         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
413         hw_feat->aoe         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
414         hw_feat->ts          = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
415         hw_feat->eee         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
416         hw_feat->tx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
417         hw_feat->rx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
418         hw_feat->addn_mac    = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
419                                               ADDMACADRSEL);
420         hw_feat->ts_src      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
421         hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
422
423         /* Hardware feature register 1 */
424         hw_feat->rx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
425                                                 RXFIFOSIZE);
426         hw_feat->tx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
427                                                 TXFIFOSIZE);
428         hw_feat->adv_ts_hi     = AXGMAC_GET_BITS(mac_hfr1,
429                                                  MAC_HWF1R, ADVTHWORD);
430         hw_feat->dma_width     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
431         hw_feat->dcb           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
432         hw_feat->sph           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
433         hw_feat->tso           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
434         hw_feat->dma_debug     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
435         hw_feat->rss           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
436         hw_feat->tc_cnt        = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
437         hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
438                                                   HASHTBLSZ);
439         hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
440                                                   L3L4FNUM);
441
442         /* Hardware feature register 2 */
443         hw_feat->rx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
444         hw_feat->tx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
445         hw_feat->rx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
446         hw_feat->tx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
447         hw_feat->pps_out_num  = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
448         hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
449                                                 AUXSNAPNUM);
450
451         /* Translate the Hash Table size into actual number */
452         switch (hw_feat->hash_table_size) {
453         case 0:
454                 break;
455         case 1:
456                 hw_feat->hash_table_size = 64;
457                 break;
458         case 2:
459                 hw_feat->hash_table_size = 128;
460                 break;
461         case 3:
462                 hw_feat->hash_table_size = 256;
463                 break;
464         }
465
466         /* Translate the address width setting into actual number */
467         switch (hw_feat->dma_width) {
468         case 0:
469                 hw_feat->dma_width = 32;
470                 break;
471         case 1:
472                 hw_feat->dma_width = 40;
473                 break;
474         case 2:
475                 hw_feat->dma_width = 48;
476                 break;
477         default:
478                 hw_feat->dma_width = 32;
479         }
480
481         /* The Queue, Channel and TC counts are zero based so increment them
482          * to get the actual number
483          */
484         hw_feat->rx_q_cnt++;
485         hw_feat->tx_q_cnt++;
486         hw_feat->rx_ch_cnt++;
487         hw_feat->tx_ch_cnt++;
488         hw_feat->tc_cnt++;
489
490         /* Translate the fifo sizes into actual numbers */
491         hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
492         hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
493 }
494
495 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
496 {
497         axgbe_init_function_ptrs_dev(&pdata->hw_if);
498         axgbe_init_function_ptrs_phy(&pdata->phy_if);
499         axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
500         pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
501 }
502
503 static void axgbe_set_counts(struct axgbe_port *pdata)
504 {
505         /* Set all the function pointers */
506         axgbe_init_all_fptrs(pdata);
507
508         /* Populate the hardware features */
509         axgbe_get_all_hw_features(pdata);
510
511         /* Set default max values if not provided */
512         if (!pdata->tx_max_channel_count)
513                 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
514         if (!pdata->rx_max_channel_count)
515                 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
516
517         if (!pdata->tx_max_q_count)
518                 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
519         if (!pdata->rx_max_q_count)
520                 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
521
522         /* Calculate the number of Tx and Rx rings to be created
523          *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
524          *   the number of Tx queues to the number of Tx channels
525          *   enabled
526          *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
527          *   number of Rx queues or maximum allowed
528          */
529         pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
530                                      pdata->tx_max_channel_count);
531         pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
532                                      pdata->tx_max_q_count);
533
534         pdata->tx_q_count = pdata->tx_ring_count;
535
536         pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
537                                      pdata->rx_max_channel_count);
538
539         pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
540                                   pdata->rx_max_q_count);
541 }
542
543 static void axgbe_default_config(struct axgbe_port *pdata)
544 {
545         pdata->pblx8 = DMA_PBL_X8_ENABLE;
546         pdata->tx_sf_mode = MTL_TSF_ENABLE;
547         pdata->tx_threshold = MTL_TX_THRESHOLD_64;
548         pdata->tx_pbl = DMA_PBL_32;
549         pdata->tx_osp_mode = DMA_OSP_ENABLE;
550         pdata->rx_sf_mode = MTL_RSF_ENABLE;
551         pdata->rx_threshold = MTL_RX_THRESHOLD_64;
552         pdata->rx_pbl = DMA_PBL_32;
553         pdata->pause_autoneg = 1;
554         pdata->tx_pause = 0;
555         pdata->rx_pause = 0;
556         pdata->phy_speed = SPEED_UNKNOWN;
557         pdata->power_down = 0;
558 }
559
560 /*
561  * It returns 0 on success.
562  */
563 static int
564 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
565 {
566         PMD_INIT_FUNC_TRACE();
567         struct axgbe_port *pdata;
568         struct rte_pci_device *pci_dev;
569         uint32_t reg, mac_lo, mac_hi;
570         int ret;
571
572         eth_dev->dev_ops = &axgbe_eth_dev_ops;
573         eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
574
575         /*
576          * For secondary processes, we don't initialise any further as primary
577          * has already done this work.
578          */
579         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
580                 return 0;
581
582         pdata = (struct axgbe_port *)eth_dev->data->dev_private;
583         /* initial state */
584         axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
585         axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
586         pdata->eth_dev = eth_dev;
587
588         pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
589         pdata->pci_dev = pci_dev;
590
591         pdata->xgmac_regs =
592                 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
593         pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
594                                      + AXGBE_MAC_PROP_OFFSET);
595         pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
596                                     + AXGBE_I2C_CTRL_OFFSET);
597         pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
598
599         /* version specific driver data*/
600         if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
601                 pdata->vdata = &axgbe_v2a;
602         else
603                 pdata->vdata = &axgbe_v2b;
604
605         /* Configure the PCS indirect addressing support */
606         reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
607         pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
608         pdata->xpcs_window <<= 6;
609         pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
610         pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
611         pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
612         pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
613         pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
614         PMD_INIT_LOG(DEBUG,
615                      "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
616                      pdata->xpcs_window_size, pdata->xpcs_window_mask);
617         XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
618
619         /* Retrieve the MAC address */
620         mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
621         mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
622         pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
623         pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
624         pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
625         pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
626         pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
627         pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
628
629         eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
630                                                ETHER_ADDR_LEN, 0);
631         if (!eth_dev->data->mac_addrs) {
632                 PMD_INIT_LOG(ERR,
633                              "Failed to alloc %u bytes needed to store MAC addr tbl",
634                              ETHER_ADDR_LEN);
635                 return -ENOMEM;
636         }
637
638         if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
639                 eth_random_addr(pdata->mac_addr.addr_bytes);
640
641         /* Copy the permanent MAC address */
642         ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
643
644         /* Clock settings */
645         pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
646         pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
647
648         /* Set the DMA coherency values */
649         pdata->coherent = 1;
650         pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
651         pdata->arcache = AXGBE_DMA_OS_ARCACHE;
652         pdata->awcache = AXGBE_DMA_OS_AWCACHE;
653
654         /* Set the maximum channels and queues */
655         reg = XP_IOREAD(pdata, XP_PROP_1);
656         pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
657         pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
658         pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
659         pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
660
661         /* Set the hardware channel and queue counts */
662         axgbe_set_counts(pdata);
663
664         /* Set the maximum fifo amounts */
665         reg = XP_IOREAD(pdata, XP_PROP_2);
666         pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
667         pdata->tx_max_fifo_size *= 16384;
668         pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
669                                           pdata->vdata->tx_max_fifo_size);
670         pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
671         pdata->rx_max_fifo_size *= 16384;
672         pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
673                                           pdata->vdata->rx_max_fifo_size);
674         /* Issue software reset to DMA */
675         ret = pdata->hw_if.exit(pdata);
676         if (ret)
677                 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
678
679         /* Set default configuration data */
680         axgbe_default_config(pdata);
681
682         /* Set default max values if not provided */
683         if (!pdata->tx_max_fifo_size)
684                 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
685         if (!pdata->rx_max_fifo_size)
686                 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
687
688         pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
689         pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
690         pthread_mutex_init(&pdata->xpcs_mutex, NULL);
691         pthread_mutex_init(&pdata->i2c_mutex, NULL);
692         pthread_mutex_init(&pdata->an_mutex, NULL);
693         pthread_mutex_init(&pdata->phy_mutex, NULL);
694
695         ret = pdata->phy_if.phy_init(pdata);
696         if (ret) {
697                 rte_free(eth_dev->data->mac_addrs);
698                 return ret;
699         }
700
701         rte_intr_callback_register(&pci_dev->intr_handle,
702                                    axgbe_dev_interrupt_handler,
703                                    (void *)eth_dev);
704         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
705                      eth_dev->data->port_id, pci_dev->id.vendor_id,
706                      pci_dev->id.device_id);
707
708         return 0;
709 }
710
711 static int
712 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
713 {
714         struct rte_pci_device *pci_dev;
715
716         PMD_INIT_FUNC_TRACE();
717
718         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
719                 return 0;
720
721         pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
722         /*Free macaddres*/
723         rte_free(eth_dev->data->mac_addrs);
724         eth_dev->data->mac_addrs = NULL;
725         eth_dev->dev_ops = NULL;
726         eth_dev->rx_pkt_burst = NULL;
727         eth_dev->tx_pkt_burst = NULL;
728         axgbe_dev_clear_queues(eth_dev);
729
730         /* disable uio intr before callback unregister */
731         rte_intr_disable(&pci_dev->intr_handle);
732         rte_intr_callback_unregister(&pci_dev->intr_handle,
733                                      axgbe_dev_interrupt_handler,
734                                      (void *)eth_dev);
735
736         return 0;
737 }
738
739 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
740         struct rte_pci_device *pci_dev)
741 {
742         return rte_eth_dev_pci_generic_probe(pci_dev,
743                 sizeof(struct axgbe_port), eth_axgbe_dev_init);
744 }
745
746 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
747 {
748         return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
749 }
750
751 static struct rte_pci_driver rte_axgbe_pmd = {
752         .id_table = pci_id_axgbe_map,
753         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
754         .probe = eth_axgbe_pci_probe,
755         .remove = eth_axgbe_pci_remove,
756 };
757
758 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
759 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
760 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
761
762 RTE_INIT(axgbe_init_log);
763 static void
764 axgbe_init_log(void)
765 {
766         axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
767         if (axgbe_logtype_init >= 0)
768                 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
769         axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
770         if (axgbe_logtype_driver >= 0)
771                 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
772 }