1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
11 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
12 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
13 static int axgbe_dev_configure(struct rte_eth_dev *dev);
14 static int axgbe_dev_start(struct rte_eth_dev *dev);
15 static void axgbe_dev_stop(struct rte_eth_dev *dev);
16 static void axgbe_dev_interrupt_handler(void *param);
17 static void axgbe_dev_close(struct rte_eth_dev *dev);
18 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
19 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
20 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
21 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
22 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
23 int wait_to_complete);
24 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
25 struct rte_eth_stats *stats);
26 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
27 static int axgbe_dev_info_get(struct rte_eth_dev *dev,
28 struct rte_eth_dev_info *dev_info);
30 /* The set of PCI devices this driver supports */
31 #define AMD_PCI_VENDOR_ID 0x1022
32 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
33 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
35 int axgbe_logtype_init;
36 int axgbe_logtype_driver;
38 static const struct rte_pci_id pci_id_axgbe_map[] = {
39 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
40 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
44 static struct axgbe_version_data axgbe_v2a = {
45 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
46 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
48 .tx_max_fifo_size = 229376,
49 .rx_max_fifo_size = 229376,
50 .tx_tstamp_workaround = 1,
53 .an_cdr_workaround = 1,
56 static struct axgbe_version_data axgbe_v2b = {
57 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
58 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
60 .tx_max_fifo_size = 65536,
61 .rx_max_fifo_size = 65536,
62 .tx_tstamp_workaround = 1,
65 .an_cdr_workaround = 1,
68 static const struct rte_eth_desc_lim rx_desc_lim = {
69 .nb_max = AXGBE_MAX_RING_DESC,
70 .nb_min = AXGBE_MIN_RING_DESC,
74 static const struct rte_eth_desc_lim tx_desc_lim = {
75 .nb_max = AXGBE_MAX_RING_DESC,
76 .nb_min = AXGBE_MIN_RING_DESC,
80 static const struct eth_dev_ops axgbe_eth_dev_ops = {
81 .dev_configure = axgbe_dev_configure,
82 .dev_start = axgbe_dev_start,
83 .dev_stop = axgbe_dev_stop,
84 .dev_close = axgbe_dev_close,
85 .promiscuous_enable = axgbe_dev_promiscuous_enable,
86 .promiscuous_disable = axgbe_dev_promiscuous_disable,
87 .allmulticast_enable = axgbe_dev_allmulticast_enable,
88 .allmulticast_disable = axgbe_dev_allmulticast_disable,
89 .link_update = axgbe_dev_link_update,
90 .stats_get = axgbe_dev_stats_get,
91 .stats_reset = axgbe_dev_stats_reset,
92 .dev_infos_get = axgbe_dev_info_get,
93 .rx_queue_setup = axgbe_dev_rx_queue_setup,
94 .rx_queue_release = axgbe_dev_rx_queue_release,
95 .tx_queue_setup = axgbe_dev_tx_queue_setup,
96 .tx_queue_release = axgbe_dev_tx_queue_release,
99 static int axgbe_phy_reset(struct axgbe_port *pdata)
101 pdata->phy_link = -1;
102 pdata->phy_speed = SPEED_UNKNOWN;
103 return pdata->phy_if.phy_reset(pdata);
107 * Interrupt handler triggered by NIC for handling
108 * specific interrupt.
111 * Pointer to interrupt handle.
113 * The address of parameter (struct rte_eth_dev *) regsitered before.
119 axgbe_dev_interrupt_handler(void *param)
121 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
122 struct axgbe_port *pdata = dev->data->dev_private;
123 unsigned int dma_isr, dma_ch_isr;
125 pdata->phy_if.an_isr(pdata);
126 /*DMA related interrupts*/
127 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
131 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
134 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
136 DMA_CH_SR, dma_ch_isr);
139 /* Unmask interrupts since disabled after generation */
140 rte_intr_ack(&pdata->pci_dev->intr_handle);
144 * Configure device link speed and setup link.
145 * It returns 0 on success.
148 axgbe_dev_configure(struct rte_eth_dev *dev)
150 struct axgbe_port *pdata = dev->data->dev_private;
151 /* Checksum offload to hardware */
152 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
153 DEV_RX_OFFLOAD_CHECKSUM;
158 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
160 struct axgbe_port *pdata = dev->data->dev_private;
162 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
163 pdata->rss_enable = 1;
164 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
165 pdata->rss_enable = 0;
172 axgbe_dev_start(struct rte_eth_dev *dev)
174 struct axgbe_port *pdata = dev->data->dev_private;
177 PMD_INIT_FUNC_TRACE();
180 ret = axgbe_dev_rx_mq_config(dev);
182 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
185 ret = axgbe_phy_reset(pdata);
187 PMD_DRV_LOG(ERR, "phy reset failed\n");
190 ret = pdata->hw_if.init(pdata);
192 PMD_DRV_LOG(ERR, "dev_init failed\n");
196 /* enable uio/vfio intr/eventfd mapping */
197 rte_intr_enable(&pdata->pci_dev->intr_handle);
200 pdata->phy_if.phy_start(pdata);
201 axgbe_dev_enable_tx(dev);
202 axgbe_dev_enable_rx(dev);
204 axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
205 axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
209 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
211 axgbe_dev_stop(struct rte_eth_dev *dev)
213 struct axgbe_port *pdata = dev->data->dev_private;
215 PMD_INIT_FUNC_TRACE();
217 rte_intr_disable(&pdata->pci_dev->intr_handle);
219 if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
222 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
223 axgbe_dev_disable_tx(dev);
224 axgbe_dev_disable_rx(dev);
226 pdata->phy_if.phy_stop(pdata);
227 pdata->hw_if.exit(pdata);
228 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
229 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
232 /* Clear all resources like TX/RX queues. */
234 axgbe_dev_close(struct rte_eth_dev *dev)
236 axgbe_dev_clear_queues(dev);
240 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
242 struct axgbe_port *pdata = dev->data->dev_private;
244 PMD_INIT_FUNC_TRACE();
246 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
252 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
254 struct axgbe_port *pdata = dev->data->dev_private;
256 PMD_INIT_FUNC_TRACE();
258 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
264 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
266 struct axgbe_port *pdata = dev->data->dev_private;
268 PMD_INIT_FUNC_TRACE();
270 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
272 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
278 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
280 struct axgbe_port *pdata = dev->data->dev_private;
282 PMD_INIT_FUNC_TRACE();
284 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
286 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
291 /* return 0 means link status changed, -1 means not changed */
293 axgbe_dev_link_update(struct rte_eth_dev *dev,
294 int wait_to_complete __rte_unused)
296 struct axgbe_port *pdata = dev->data->dev_private;
297 struct rte_eth_link link;
300 PMD_INIT_FUNC_TRACE();
303 pdata->phy_if.phy_status(pdata);
305 memset(&link, 0, sizeof(struct rte_eth_link));
306 link.link_duplex = pdata->phy.duplex;
307 link.link_status = pdata->phy_link;
308 link.link_speed = pdata->phy_speed;
309 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
310 ETH_LINK_SPEED_FIXED);
311 ret = rte_eth_linkstatus_set(dev, &link);
313 PMD_DRV_LOG(ERR, "No change in link status\n");
319 axgbe_dev_stats_get(struct rte_eth_dev *dev,
320 struct rte_eth_stats *stats)
322 struct axgbe_rx_queue *rxq;
323 struct axgbe_tx_queue *txq;
326 for (i = 0; i < dev->data->nb_rx_queues; i++) {
327 rxq = dev->data->rx_queues[i];
328 stats->q_ipackets[i] = rxq->pkts;
329 stats->ipackets += rxq->pkts;
330 stats->q_ibytes[i] = rxq->bytes;
331 stats->ibytes += rxq->bytes;
333 for (i = 0; i < dev->data->nb_tx_queues; i++) {
334 txq = dev->data->tx_queues[i];
335 stats->q_opackets[i] = txq->pkts;
336 stats->opackets += txq->pkts;
337 stats->q_obytes[i] = txq->bytes;
338 stats->obytes += txq->bytes;
345 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
347 struct axgbe_rx_queue *rxq;
348 struct axgbe_tx_queue *txq;
351 for (i = 0; i < dev->data->nb_rx_queues; i++) {
352 rxq = dev->data->rx_queues[i];
357 for (i = 0; i < dev->data->nb_tx_queues; i++) {
358 txq = dev->data->tx_queues[i];
368 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
370 struct axgbe_port *pdata = dev->data->dev_private;
372 dev_info->max_rx_queues = pdata->rx_ring_count;
373 dev_info->max_tx_queues = pdata->tx_ring_count;
374 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
375 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
376 dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
377 dev_info->speed_capa = ETH_LINK_SPEED_10G;
379 dev_info->rx_offload_capa =
380 DEV_RX_OFFLOAD_IPV4_CKSUM |
381 DEV_RX_OFFLOAD_UDP_CKSUM |
382 DEV_RX_OFFLOAD_TCP_CKSUM |
383 DEV_RX_OFFLOAD_KEEP_CRC;
385 dev_info->tx_offload_capa =
386 DEV_TX_OFFLOAD_IPV4_CKSUM |
387 DEV_TX_OFFLOAD_UDP_CKSUM |
388 DEV_TX_OFFLOAD_TCP_CKSUM;
390 if (pdata->hw_feat.rss) {
391 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
392 dev_info->reta_size = pdata->hw_feat.hash_table_size;
393 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
396 dev_info->rx_desc_lim = rx_desc_lim;
397 dev_info->tx_desc_lim = tx_desc_lim;
399 dev_info->default_rxconf = (struct rte_eth_rxconf) {
400 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
403 dev_info->default_txconf = (struct rte_eth_txconf) {
404 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
410 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
412 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
413 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
415 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
416 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
417 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
419 memset(hw_feat, 0, sizeof(*hw_feat));
421 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
423 /* Hardware feature register 0 */
424 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
425 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
426 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
427 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
428 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
429 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
430 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
431 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
432 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
433 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
434 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
435 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
437 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
438 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
440 /* Hardware feature register 1 */
441 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
443 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
445 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
446 MAC_HWF1R, ADVTHWORD);
447 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
448 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
449 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
450 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
451 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
452 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
453 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
454 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
456 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
459 /* Hardware feature register 2 */
460 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
461 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
462 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
463 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
464 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
465 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
468 /* Translate the Hash Table size into actual number */
469 switch (hw_feat->hash_table_size) {
473 hw_feat->hash_table_size = 64;
476 hw_feat->hash_table_size = 128;
479 hw_feat->hash_table_size = 256;
483 /* Translate the address width setting into actual number */
484 switch (hw_feat->dma_width) {
486 hw_feat->dma_width = 32;
489 hw_feat->dma_width = 40;
492 hw_feat->dma_width = 48;
495 hw_feat->dma_width = 32;
498 /* The Queue, Channel and TC counts are zero based so increment them
499 * to get the actual number
503 hw_feat->rx_ch_cnt++;
504 hw_feat->tx_ch_cnt++;
507 /* Translate the fifo sizes into actual numbers */
508 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
509 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
512 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
514 axgbe_init_function_ptrs_dev(&pdata->hw_if);
515 axgbe_init_function_ptrs_phy(&pdata->phy_if);
516 axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
517 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
520 static void axgbe_set_counts(struct axgbe_port *pdata)
522 /* Set all the function pointers */
523 axgbe_init_all_fptrs(pdata);
525 /* Populate the hardware features */
526 axgbe_get_all_hw_features(pdata);
528 /* Set default max values if not provided */
529 if (!pdata->tx_max_channel_count)
530 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
531 if (!pdata->rx_max_channel_count)
532 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
534 if (!pdata->tx_max_q_count)
535 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
536 if (!pdata->rx_max_q_count)
537 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
539 /* Calculate the number of Tx and Rx rings to be created
540 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
541 * the number of Tx queues to the number of Tx channels
543 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
544 * number of Rx queues or maximum allowed
546 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
547 pdata->tx_max_channel_count);
548 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
549 pdata->tx_max_q_count);
551 pdata->tx_q_count = pdata->tx_ring_count;
553 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
554 pdata->rx_max_channel_count);
556 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
557 pdata->rx_max_q_count);
560 static void axgbe_default_config(struct axgbe_port *pdata)
562 pdata->pblx8 = DMA_PBL_X8_ENABLE;
563 pdata->tx_sf_mode = MTL_TSF_ENABLE;
564 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
565 pdata->tx_pbl = DMA_PBL_32;
566 pdata->tx_osp_mode = DMA_OSP_ENABLE;
567 pdata->rx_sf_mode = MTL_RSF_ENABLE;
568 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
569 pdata->rx_pbl = DMA_PBL_32;
570 pdata->pause_autoneg = 1;
573 pdata->phy_speed = SPEED_UNKNOWN;
574 pdata->power_down = 0;
578 * It returns 0 on success.
581 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
583 PMD_INIT_FUNC_TRACE();
584 struct axgbe_port *pdata;
585 struct rte_pci_device *pci_dev;
586 uint32_t reg, mac_lo, mac_hi;
589 eth_dev->dev_ops = &axgbe_eth_dev_ops;
590 eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
593 * For secondary processes, we don't initialise any further as primary
594 * has already done this work.
596 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
599 pdata = eth_dev->data->dev_private;
601 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
602 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
603 pdata->eth_dev = eth_dev;
605 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
606 pdata->pci_dev = pci_dev;
609 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
610 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
611 + AXGBE_MAC_PROP_OFFSET);
612 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
613 + AXGBE_I2C_CTRL_OFFSET);
614 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
616 /* version specific driver data*/
617 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
618 pdata->vdata = &axgbe_v2a;
620 pdata->vdata = &axgbe_v2b;
622 /* Configure the PCS indirect addressing support */
623 reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
624 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
625 pdata->xpcs_window <<= 6;
626 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
627 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
628 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
629 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
630 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
632 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
633 pdata->xpcs_window_size, pdata->xpcs_window_mask);
634 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
636 /* Retrieve the MAC address */
637 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
638 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
639 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
640 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
641 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
642 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
643 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
644 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
646 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
647 RTE_ETHER_ADDR_LEN, 0);
648 if (!eth_dev->data->mac_addrs) {
650 "Failed to alloc %u bytes needed to store MAC addr tbl",
655 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
656 rte_eth_random_addr(pdata->mac_addr.addr_bytes);
658 /* Copy the permanent MAC address */
659 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
662 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
663 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
665 /* Set the DMA coherency values */
667 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
668 pdata->arcache = AXGBE_DMA_OS_ARCACHE;
669 pdata->awcache = AXGBE_DMA_OS_AWCACHE;
671 /* Set the maximum channels and queues */
672 reg = XP_IOREAD(pdata, XP_PROP_1);
673 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
674 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
675 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
676 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
678 /* Set the hardware channel and queue counts */
679 axgbe_set_counts(pdata);
681 /* Set the maximum fifo amounts */
682 reg = XP_IOREAD(pdata, XP_PROP_2);
683 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
684 pdata->tx_max_fifo_size *= 16384;
685 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
686 pdata->vdata->tx_max_fifo_size);
687 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
688 pdata->rx_max_fifo_size *= 16384;
689 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
690 pdata->vdata->rx_max_fifo_size);
691 /* Issue software reset to DMA */
692 ret = pdata->hw_if.exit(pdata);
694 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
696 /* Set default configuration data */
697 axgbe_default_config(pdata);
699 /* Set default max values if not provided */
700 if (!pdata->tx_max_fifo_size)
701 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
702 if (!pdata->rx_max_fifo_size)
703 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
705 pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
706 pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
707 pthread_mutex_init(&pdata->xpcs_mutex, NULL);
708 pthread_mutex_init(&pdata->i2c_mutex, NULL);
709 pthread_mutex_init(&pdata->an_mutex, NULL);
710 pthread_mutex_init(&pdata->phy_mutex, NULL);
712 ret = pdata->phy_if.phy_init(pdata);
714 rte_free(eth_dev->data->mac_addrs);
715 eth_dev->data->mac_addrs = NULL;
719 rte_intr_callback_register(&pci_dev->intr_handle,
720 axgbe_dev_interrupt_handler,
722 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
723 eth_dev->data->port_id, pci_dev->id.vendor_id,
724 pci_dev->id.device_id);
730 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
732 struct rte_pci_device *pci_dev;
734 PMD_INIT_FUNC_TRACE();
736 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
739 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
740 eth_dev->dev_ops = NULL;
741 eth_dev->rx_pkt_burst = NULL;
742 eth_dev->tx_pkt_burst = NULL;
743 axgbe_dev_clear_queues(eth_dev);
745 /* disable uio intr before callback unregister */
746 rte_intr_disable(&pci_dev->intr_handle);
747 rte_intr_callback_unregister(&pci_dev->intr_handle,
748 axgbe_dev_interrupt_handler,
754 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
755 struct rte_pci_device *pci_dev)
757 return rte_eth_dev_pci_generic_probe(pci_dev,
758 sizeof(struct axgbe_port), eth_axgbe_dev_init);
761 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
763 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
766 static struct rte_pci_driver rte_axgbe_pmd = {
767 .id_table = pci_id_axgbe_map,
768 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
769 .probe = eth_axgbe_pci_probe,
770 .remove = eth_axgbe_pci_remove,
773 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
774 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
775 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
777 RTE_INIT(axgbe_init_log)
779 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
780 if (axgbe_logtype_init >= 0)
781 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
782 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
783 if (axgbe_logtype_driver >= 0)
784 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);