1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
11 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
12 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
13 static int axgbe_dev_configure(struct rte_eth_dev *dev);
14 static int axgbe_dev_start(struct rte_eth_dev *dev);
15 static void axgbe_dev_stop(struct rte_eth_dev *dev);
16 static void axgbe_dev_interrupt_handler(void *param);
17 static void axgbe_dev_close(struct rte_eth_dev *dev);
18 static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
19 static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
20 static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
21 static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
22 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
23 int wait_to_complete);
24 static void axgbe_dev_info_get(struct rte_eth_dev *dev,
25 struct rte_eth_dev_info *dev_info);
27 /* The set of PCI devices this driver supports */
28 #define AMD_PCI_VENDOR_ID 0x1022
29 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
30 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
32 int axgbe_logtype_init;
33 int axgbe_logtype_driver;
35 static const struct rte_pci_id pci_id_axgbe_map[] = {
36 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
37 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
41 static struct axgbe_version_data axgbe_v2a = {
42 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
43 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
45 .tx_max_fifo_size = 229376,
46 .rx_max_fifo_size = 229376,
47 .tx_tstamp_workaround = 1,
52 static struct axgbe_version_data axgbe_v2b = {
53 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
54 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
56 .tx_max_fifo_size = 65536,
57 .rx_max_fifo_size = 65536,
58 .tx_tstamp_workaround = 1,
63 static const struct rte_eth_desc_lim rx_desc_lim = {
64 .nb_max = AXGBE_MAX_RING_DESC,
65 .nb_min = AXGBE_MIN_RING_DESC,
69 static const struct rte_eth_desc_lim tx_desc_lim = {
70 .nb_max = AXGBE_MAX_RING_DESC,
71 .nb_min = AXGBE_MIN_RING_DESC,
75 static const struct eth_dev_ops axgbe_eth_dev_ops = {
76 .dev_configure = axgbe_dev_configure,
77 .dev_start = axgbe_dev_start,
78 .dev_stop = axgbe_dev_stop,
79 .dev_close = axgbe_dev_close,
80 .promiscuous_enable = axgbe_dev_promiscuous_enable,
81 .promiscuous_disable = axgbe_dev_promiscuous_disable,
82 .allmulticast_enable = axgbe_dev_allmulticast_enable,
83 .allmulticast_disable = axgbe_dev_allmulticast_disable,
84 .link_update = axgbe_dev_link_update,
85 .dev_infos_get = axgbe_dev_info_get,
86 .rx_queue_setup = axgbe_dev_rx_queue_setup,
87 .rx_queue_release = axgbe_dev_rx_queue_release,
88 .tx_queue_setup = axgbe_dev_tx_queue_setup,
89 .tx_queue_release = axgbe_dev_tx_queue_release,
92 static int axgbe_phy_reset(struct axgbe_port *pdata)
95 pdata->phy_speed = SPEED_UNKNOWN;
96 return pdata->phy_if.phy_reset(pdata);
100 * Interrupt handler triggered by NIC for handling
101 * specific interrupt.
104 * Pointer to interrupt handle.
106 * The address of parameter (struct rte_eth_dev *) regsitered before.
112 axgbe_dev_interrupt_handler(void *param)
114 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
115 struct axgbe_port *pdata = dev->data->dev_private;
116 unsigned int dma_isr, dma_ch_isr;
118 pdata->phy_if.an_isr(pdata);
119 /*DMA related interrupts*/
120 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
124 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
127 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
129 DMA_CH_SR, dma_ch_isr);
132 /* Enable interrupts since disabled after generation*/
133 rte_intr_enable(&pdata->pci_dev->intr_handle);
137 * Configure device link speed and setup link.
138 * It returns 0 on success.
141 axgbe_dev_configure(struct rte_eth_dev *dev)
143 struct axgbe_port *pdata = dev->data->dev_private;
144 /* Checksum offload to hardware */
145 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
146 DEV_RX_OFFLOAD_CHECKSUM;
151 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
153 struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
155 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
156 pdata->rss_enable = 1;
157 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
158 pdata->rss_enable = 0;
165 axgbe_dev_start(struct rte_eth_dev *dev)
167 PMD_INIT_FUNC_TRACE();
168 struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
172 ret = axgbe_dev_rx_mq_config(dev);
174 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
177 ret = axgbe_phy_reset(pdata);
179 PMD_DRV_LOG(ERR, "phy reset failed\n");
182 ret = pdata->hw_if.init(pdata);
184 PMD_DRV_LOG(ERR, "dev_init failed\n");
188 /* enable uio/vfio intr/eventfd mapping */
189 rte_intr_enable(&pdata->pci_dev->intr_handle);
192 pdata->phy_if.phy_start(pdata);
193 axgbe_dev_enable_tx(dev);
194 axgbe_dev_enable_rx(dev);
196 axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
197 axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
201 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
203 axgbe_dev_stop(struct rte_eth_dev *dev)
205 PMD_INIT_FUNC_TRACE();
206 struct axgbe_port *pdata = dev->data->dev_private;
208 rte_intr_disable(&pdata->pci_dev->intr_handle);
210 if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
213 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
214 axgbe_dev_disable_tx(dev);
215 axgbe_dev_disable_rx(dev);
217 pdata->phy_if.phy_stop(pdata);
218 pdata->hw_if.exit(pdata);
219 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
220 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
223 /* Clear all resources like TX/RX queues. */
225 axgbe_dev_close(struct rte_eth_dev *dev)
227 axgbe_dev_clear_queues(dev);
231 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
233 PMD_INIT_FUNC_TRACE();
234 struct axgbe_port *pdata = dev->data->dev_private;
236 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
240 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
242 PMD_INIT_FUNC_TRACE();
243 struct axgbe_port *pdata = dev->data->dev_private;
245 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
249 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
251 PMD_INIT_FUNC_TRACE();
252 struct axgbe_port *pdata = dev->data->dev_private;
254 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
256 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
260 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
262 PMD_INIT_FUNC_TRACE();
263 struct axgbe_port *pdata = dev->data->dev_private;
265 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
267 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
270 /* return 0 means link status changed, -1 means not changed */
272 axgbe_dev_link_update(struct rte_eth_dev *dev,
273 int wait_to_complete __rte_unused)
275 struct axgbe_port *pdata = dev->data->dev_private;
276 struct rte_eth_link link;
279 PMD_INIT_FUNC_TRACE();
282 pdata->phy_if.phy_status(pdata);
284 memset(&link, 0, sizeof(struct rte_eth_link));
285 link.link_duplex = pdata->phy.duplex;
286 link.link_status = pdata->phy_link;
287 link.link_speed = pdata->phy_speed;
288 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
289 ETH_LINK_SPEED_FIXED);
290 ret = rte_eth_linkstatus_set(dev, &link);
292 PMD_DRV_LOG(ERR, "No change in link status\n");
298 axgbe_dev_info_get(struct rte_eth_dev *dev,
299 struct rte_eth_dev_info *dev_info)
301 struct axgbe_port *pdata = dev->data->dev_private;
303 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
304 dev_info->max_rx_queues = pdata->rx_ring_count;
305 dev_info->max_tx_queues = pdata->tx_ring_count;
306 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
307 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
308 dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
309 dev_info->speed_capa = ETH_LINK_SPEED_10G;
311 dev_info->rx_offload_capa =
312 DEV_RX_OFFLOAD_IPV4_CKSUM |
313 DEV_RX_OFFLOAD_UDP_CKSUM |
314 DEV_RX_OFFLOAD_TCP_CKSUM;
316 dev_info->tx_offload_capa =
317 DEV_TX_OFFLOAD_IPV4_CKSUM |
318 DEV_TX_OFFLOAD_UDP_CKSUM |
319 DEV_TX_OFFLOAD_TCP_CKSUM;
321 if (pdata->hw_feat.rss) {
322 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
323 dev_info->reta_size = pdata->hw_feat.hash_table_size;
324 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
327 dev_info->rx_desc_lim = rx_desc_lim;
328 dev_info->tx_desc_lim = tx_desc_lim;
330 dev_info->default_rxconf = (struct rte_eth_rxconf) {
331 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
334 dev_info->default_txconf = (struct rte_eth_txconf) {
335 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
336 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
337 ETH_TXQ_FLAGS_NOOFFLOADS,
341 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
343 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
344 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
346 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
347 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
348 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
350 memset(hw_feat, 0, sizeof(*hw_feat));
352 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
354 /* Hardware feature register 0 */
355 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
356 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
357 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
358 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
359 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
360 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
361 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
362 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
363 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
364 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
365 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
366 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
368 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
369 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
371 /* Hardware feature register 1 */
372 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
374 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
376 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
377 MAC_HWF1R, ADVTHWORD);
378 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
379 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
380 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
381 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
382 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
383 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
384 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
385 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
387 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
390 /* Hardware feature register 2 */
391 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
392 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
393 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
394 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
395 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
396 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
399 /* Translate the Hash Table size into actual number */
400 switch (hw_feat->hash_table_size) {
404 hw_feat->hash_table_size = 64;
407 hw_feat->hash_table_size = 128;
410 hw_feat->hash_table_size = 256;
414 /* Translate the address width setting into actual number */
415 switch (hw_feat->dma_width) {
417 hw_feat->dma_width = 32;
420 hw_feat->dma_width = 40;
423 hw_feat->dma_width = 48;
426 hw_feat->dma_width = 32;
429 /* The Queue, Channel and TC counts are zero based so increment them
430 * to get the actual number
434 hw_feat->rx_ch_cnt++;
435 hw_feat->tx_ch_cnt++;
438 /* Translate the fifo sizes into actual numbers */
439 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
440 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
443 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
445 axgbe_init_function_ptrs_dev(&pdata->hw_if);
446 axgbe_init_function_ptrs_phy(&pdata->phy_if);
447 axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
448 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
451 static void axgbe_set_counts(struct axgbe_port *pdata)
453 /* Set all the function pointers */
454 axgbe_init_all_fptrs(pdata);
456 /* Populate the hardware features */
457 axgbe_get_all_hw_features(pdata);
459 /* Set default max values if not provided */
460 if (!pdata->tx_max_channel_count)
461 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
462 if (!pdata->rx_max_channel_count)
463 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
465 if (!pdata->tx_max_q_count)
466 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
467 if (!pdata->rx_max_q_count)
468 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
470 /* Calculate the number of Tx and Rx rings to be created
471 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
472 * the number of Tx queues to the number of Tx channels
474 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
475 * number of Rx queues or maximum allowed
477 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
478 pdata->tx_max_channel_count);
479 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
480 pdata->tx_max_q_count);
482 pdata->tx_q_count = pdata->tx_ring_count;
484 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
485 pdata->rx_max_channel_count);
487 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
488 pdata->rx_max_q_count);
491 static void axgbe_default_config(struct axgbe_port *pdata)
493 pdata->pblx8 = DMA_PBL_X8_ENABLE;
494 pdata->tx_sf_mode = MTL_TSF_ENABLE;
495 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
496 pdata->tx_pbl = DMA_PBL_32;
497 pdata->tx_osp_mode = DMA_OSP_ENABLE;
498 pdata->rx_sf_mode = MTL_RSF_ENABLE;
499 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
500 pdata->rx_pbl = DMA_PBL_32;
501 pdata->pause_autoneg = 1;
504 pdata->phy_speed = SPEED_UNKNOWN;
505 pdata->power_down = 0;
509 * It returns 0 on success.
512 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
514 PMD_INIT_FUNC_TRACE();
515 struct axgbe_port *pdata;
516 struct rte_pci_device *pci_dev;
517 uint32_t reg, mac_lo, mac_hi;
520 eth_dev->dev_ops = &axgbe_eth_dev_ops;
521 eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
524 * For secondary processes, we don't initialise any further as primary
525 * has already done this work.
527 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
530 pdata = (struct axgbe_port *)eth_dev->data->dev_private;
532 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
533 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
534 pdata->eth_dev = eth_dev;
536 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
537 pdata->pci_dev = pci_dev;
540 (uint64_t)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
541 pdata->xprop_regs = pdata->xgmac_regs + AXGBE_MAC_PROP_OFFSET;
542 pdata->xi2c_regs = pdata->xgmac_regs + AXGBE_I2C_CTRL_OFFSET;
543 pdata->xpcs_regs = (uint64_t)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
545 /* version specific driver data*/
546 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
547 pdata->vdata = &axgbe_v2a;
549 pdata->vdata = &axgbe_v2b;
551 /* Configure the PCS indirect addressing support */
552 reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
553 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
554 pdata->xpcs_window <<= 6;
555 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
556 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
557 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
558 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
559 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
561 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
562 pdata->xpcs_window_size, pdata->xpcs_window_mask);
563 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
565 /* Retrieve the MAC address */
566 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
567 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
568 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
569 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
570 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
571 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
572 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
573 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
575 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
577 if (!eth_dev->data->mac_addrs) {
579 "Failed to alloc %u bytes needed to store MAC addr tbl",
584 if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
585 eth_random_addr(pdata->mac_addr.addr_bytes);
587 /* Copy the permanent MAC address */
588 ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
591 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
592 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
594 /* Set the DMA coherency values */
596 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
597 pdata->arcache = AXGBE_DMA_OS_ARCACHE;
598 pdata->awcache = AXGBE_DMA_OS_AWCACHE;
600 /* Set the maximum channels and queues */
601 reg = XP_IOREAD(pdata, XP_PROP_1);
602 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
603 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
604 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
605 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
607 /* Set the hardware channel and queue counts */
608 axgbe_set_counts(pdata);
610 /* Set the maximum fifo amounts */
611 reg = XP_IOREAD(pdata, XP_PROP_2);
612 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
613 pdata->tx_max_fifo_size *= 16384;
614 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
615 pdata->vdata->tx_max_fifo_size);
616 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
617 pdata->rx_max_fifo_size *= 16384;
618 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
619 pdata->vdata->rx_max_fifo_size);
620 /* Issue software reset to DMA */
621 ret = pdata->hw_if.exit(pdata);
623 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
625 /* Set default configuration data */
626 axgbe_default_config(pdata);
628 /* Set default max values if not provided */
629 if (!pdata->tx_max_fifo_size)
630 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
631 if (!pdata->rx_max_fifo_size)
632 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
634 pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
635 pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
636 pthread_mutex_init(&pdata->xpcs_mutex, NULL);
637 pthread_mutex_init(&pdata->i2c_mutex, NULL);
638 pthread_mutex_init(&pdata->an_mutex, NULL);
639 pthread_mutex_init(&pdata->phy_mutex, NULL);
641 ret = pdata->phy_if.phy_init(pdata);
643 rte_free(eth_dev->data->mac_addrs);
647 rte_intr_callback_register(&pci_dev->intr_handle,
648 axgbe_dev_interrupt_handler,
650 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
651 eth_dev->data->port_id, pci_dev->id.vendor_id,
652 pci_dev->id.device_id);
658 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
660 struct rte_pci_device *pci_dev;
662 PMD_INIT_FUNC_TRACE();
664 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
667 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
669 rte_free(eth_dev->data->mac_addrs);
670 eth_dev->data->mac_addrs = NULL;
671 eth_dev->dev_ops = NULL;
672 eth_dev->rx_pkt_burst = NULL;
673 eth_dev->tx_pkt_burst = NULL;
674 axgbe_dev_clear_queues(eth_dev);
676 /* disable uio intr before callback unregister */
677 rte_intr_disable(&pci_dev->intr_handle);
678 rte_intr_callback_unregister(&pci_dev->intr_handle,
679 axgbe_dev_interrupt_handler,
685 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
686 struct rte_pci_device *pci_dev)
688 return rte_eth_dev_pci_generic_probe(pci_dev,
689 sizeof(struct axgbe_port), eth_axgbe_dev_init);
692 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
694 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
697 static struct rte_pci_driver rte_axgbe_pmd = {
698 .id_table = pci_id_axgbe_map,
699 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
700 .probe = eth_axgbe_pci_probe,
701 .remove = eth_axgbe_pci_remove,
704 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
705 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
706 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
708 RTE_INIT(axgbe_init_log);
712 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
713 if (axgbe_logtype_init >= 0)
714 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
715 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
716 if (axgbe_logtype_driver >= 0)
717 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);