1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
11 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
12 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
13 static int axgbe_dev_configure(struct rte_eth_dev *dev);
14 static int axgbe_dev_start(struct rte_eth_dev *dev);
15 static void axgbe_dev_stop(struct rte_eth_dev *dev);
16 static void axgbe_dev_interrupt_handler(void *param);
17 static void axgbe_dev_close(struct rte_eth_dev *dev);
18 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
19 int wait_to_complete);
20 static void axgbe_dev_info_get(struct rte_eth_dev *dev,
21 struct rte_eth_dev_info *dev_info);
23 /* The set of PCI devices this driver supports */
24 #define AMD_PCI_VENDOR_ID 0x1022
25 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
26 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
28 int axgbe_logtype_init;
29 int axgbe_logtype_driver;
31 static const struct rte_pci_id pci_id_axgbe_map[] = {
32 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
33 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
37 static struct axgbe_version_data axgbe_v2a = {
38 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
39 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
41 .tx_max_fifo_size = 229376,
42 .rx_max_fifo_size = 229376,
43 .tx_tstamp_workaround = 1,
48 static struct axgbe_version_data axgbe_v2b = {
49 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
50 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
52 .tx_max_fifo_size = 65536,
53 .rx_max_fifo_size = 65536,
54 .tx_tstamp_workaround = 1,
59 static const struct rte_eth_desc_lim rx_desc_lim = {
60 .nb_max = AXGBE_MAX_RING_DESC,
61 .nb_min = AXGBE_MIN_RING_DESC,
65 static const struct rte_eth_desc_lim tx_desc_lim = {
66 .nb_max = AXGBE_MAX_RING_DESC,
67 .nb_min = AXGBE_MIN_RING_DESC,
71 static const struct eth_dev_ops axgbe_eth_dev_ops = {
72 .dev_configure = axgbe_dev_configure,
73 .dev_start = axgbe_dev_start,
74 .dev_stop = axgbe_dev_stop,
75 .dev_close = axgbe_dev_close,
76 .link_update = axgbe_dev_link_update,
77 .dev_infos_get = axgbe_dev_info_get,
78 .rx_queue_setup = axgbe_dev_rx_queue_setup,
79 .rx_queue_release = axgbe_dev_rx_queue_release,
80 .tx_queue_setup = axgbe_dev_tx_queue_setup,
81 .tx_queue_release = axgbe_dev_tx_queue_release,
84 static int axgbe_phy_reset(struct axgbe_port *pdata)
87 pdata->phy_speed = SPEED_UNKNOWN;
88 return pdata->phy_if.phy_reset(pdata);
92 * Interrupt handler triggered by NIC for handling
96 * Pointer to interrupt handle.
98 * The address of parameter (struct rte_eth_dev *) regsitered before.
104 axgbe_dev_interrupt_handler(void *param)
106 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
107 struct axgbe_port *pdata = dev->data->dev_private;
108 unsigned int dma_isr, dma_ch_isr;
110 pdata->phy_if.an_isr(pdata);
111 /*DMA related interrupts*/
112 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
116 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
119 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
121 DMA_CH_SR, dma_ch_isr);
124 /* Enable interrupts since disabled after generation*/
125 rte_intr_enable(&pdata->pci_dev->intr_handle);
129 * Configure device link speed and setup link.
130 * It returns 0 on success.
133 axgbe_dev_configure(struct rte_eth_dev *dev)
135 struct axgbe_port *pdata = dev->data->dev_private;
136 /* Checksum offload to hardware */
137 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
138 DEV_RX_OFFLOAD_CHECKSUM;
143 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
145 struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
147 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
148 pdata->rss_enable = 1;
149 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
150 pdata->rss_enable = 0;
157 axgbe_dev_start(struct rte_eth_dev *dev)
159 PMD_INIT_FUNC_TRACE();
160 struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
164 ret = axgbe_dev_rx_mq_config(dev);
166 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
169 ret = axgbe_phy_reset(pdata);
171 PMD_DRV_LOG(ERR, "phy reset failed\n");
174 ret = pdata->hw_if.init(pdata);
176 PMD_DRV_LOG(ERR, "dev_init failed\n");
180 /* enable uio/vfio intr/eventfd mapping */
181 rte_intr_enable(&pdata->pci_dev->intr_handle);
184 pdata->phy_if.phy_start(pdata);
185 axgbe_dev_enable_tx(dev);
186 axgbe_dev_enable_rx(dev);
188 axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
189 axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
193 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
195 axgbe_dev_stop(struct rte_eth_dev *dev)
197 PMD_INIT_FUNC_TRACE();
198 struct axgbe_port *pdata = dev->data->dev_private;
200 rte_intr_disable(&pdata->pci_dev->intr_handle);
202 if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
205 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
206 axgbe_dev_disable_tx(dev);
207 axgbe_dev_disable_rx(dev);
209 pdata->phy_if.phy_stop(pdata);
210 pdata->hw_if.exit(pdata);
211 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
212 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
215 /* Clear all resources like TX/RX queues. */
217 axgbe_dev_close(struct rte_eth_dev *dev)
219 axgbe_dev_clear_queues(dev);
222 /* return 0 means link status changed, -1 means not changed */
224 axgbe_dev_link_update(struct rte_eth_dev *dev,
225 int wait_to_complete __rte_unused)
227 struct axgbe_port *pdata = dev->data->dev_private;
228 struct rte_eth_link link;
231 PMD_INIT_FUNC_TRACE();
234 pdata->phy_if.phy_status(pdata);
236 memset(&link, 0, sizeof(struct rte_eth_link));
237 link.link_duplex = pdata->phy.duplex;
238 link.link_status = pdata->phy_link;
239 link.link_speed = pdata->phy_speed;
240 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
241 ETH_LINK_SPEED_FIXED);
242 ret = rte_eth_linkstatus_set(dev, &link);
244 PMD_DRV_LOG(ERR, "No change in link status\n");
250 axgbe_dev_info_get(struct rte_eth_dev *dev,
251 struct rte_eth_dev_info *dev_info)
253 struct axgbe_port *pdata = dev->data->dev_private;
255 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
256 dev_info->max_rx_queues = pdata->rx_ring_count;
257 dev_info->max_tx_queues = pdata->tx_ring_count;
258 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
259 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
260 dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
261 dev_info->speed_capa = ETH_LINK_SPEED_10G;
263 dev_info->rx_offload_capa =
264 DEV_RX_OFFLOAD_IPV4_CKSUM |
265 DEV_RX_OFFLOAD_UDP_CKSUM |
266 DEV_RX_OFFLOAD_TCP_CKSUM;
268 dev_info->tx_offload_capa =
269 DEV_TX_OFFLOAD_IPV4_CKSUM |
270 DEV_TX_OFFLOAD_UDP_CKSUM |
271 DEV_TX_OFFLOAD_TCP_CKSUM;
273 if (pdata->hw_feat.rss) {
274 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
275 dev_info->reta_size = pdata->hw_feat.hash_table_size;
276 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
279 dev_info->rx_desc_lim = rx_desc_lim;
280 dev_info->tx_desc_lim = tx_desc_lim;
282 dev_info->default_rxconf = (struct rte_eth_rxconf) {
283 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
286 dev_info->default_txconf = (struct rte_eth_txconf) {
287 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
288 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
289 ETH_TXQ_FLAGS_NOOFFLOADS,
293 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
295 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
296 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
298 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
299 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
300 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
302 memset(hw_feat, 0, sizeof(*hw_feat));
304 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
306 /* Hardware feature register 0 */
307 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
308 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
309 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
310 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
311 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
312 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
313 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
314 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
315 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
316 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
317 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
318 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
320 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
321 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
323 /* Hardware feature register 1 */
324 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
326 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
328 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
329 MAC_HWF1R, ADVTHWORD);
330 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
331 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
332 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
333 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
334 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
335 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
336 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
337 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
339 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
342 /* Hardware feature register 2 */
343 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
344 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
345 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
346 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
347 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
348 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
351 /* Translate the Hash Table size into actual number */
352 switch (hw_feat->hash_table_size) {
356 hw_feat->hash_table_size = 64;
359 hw_feat->hash_table_size = 128;
362 hw_feat->hash_table_size = 256;
366 /* Translate the address width setting into actual number */
367 switch (hw_feat->dma_width) {
369 hw_feat->dma_width = 32;
372 hw_feat->dma_width = 40;
375 hw_feat->dma_width = 48;
378 hw_feat->dma_width = 32;
381 /* The Queue, Channel and TC counts are zero based so increment them
382 * to get the actual number
386 hw_feat->rx_ch_cnt++;
387 hw_feat->tx_ch_cnt++;
390 /* Translate the fifo sizes into actual numbers */
391 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
392 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
395 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
397 axgbe_init_function_ptrs_dev(&pdata->hw_if);
398 axgbe_init_function_ptrs_phy(&pdata->phy_if);
399 axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
400 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
403 static void axgbe_set_counts(struct axgbe_port *pdata)
405 /* Set all the function pointers */
406 axgbe_init_all_fptrs(pdata);
408 /* Populate the hardware features */
409 axgbe_get_all_hw_features(pdata);
411 /* Set default max values if not provided */
412 if (!pdata->tx_max_channel_count)
413 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
414 if (!pdata->rx_max_channel_count)
415 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
417 if (!pdata->tx_max_q_count)
418 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
419 if (!pdata->rx_max_q_count)
420 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
422 /* Calculate the number of Tx and Rx rings to be created
423 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
424 * the number of Tx queues to the number of Tx channels
426 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
427 * number of Rx queues or maximum allowed
429 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
430 pdata->tx_max_channel_count);
431 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
432 pdata->tx_max_q_count);
434 pdata->tx_q_count = pdata->tx_ring_count;
436 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
437 pdata->rx_max_channel_count);
439 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
440 pdata->rx_max_q_count);
443 static void axgbe_default_config(struct axgbe_port *pdata)
445 pdata->pblx8 = DMA_PBL_X8_ENABLE;
446 pdata->tx_sf_mode = MTL_TSF_ENABLE;
447 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
448 pdata->tx_pbl = DMA_PBL_32;
449 pdata->tx_osp_mode = DMA_OSP_ENABLE;
450 pdata->rx_sf_mode = MTL_RSF_ENABLE;
451 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
452 pdata->rx_pbl = DMA_PBL_32;
453 pdata->pause_autoneg = 1;
456 pdata->phy_speed = SPEED_UNKNOWN;
457 pdata->power_down = 0;
461 * It returns 0 on success.
464 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
466 PMD_INIT_FUNC_TRACE();
467 struct axgbe_port *pdata;
468 struct rte_pci_device *pci_dev;
469 uint32_t reg, mac_lo, mac_hi;
472 eth_dev->dev_ops = &axgbe_eth_dev_ops;
473 eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
476 * For secondary processes, we don't initialise any further as primary
477 * has already done this work.
479 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
482 pdata = (struct axgbe_port *)eth_dev->data->dev_private;
484 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
485 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
486 pdata->eth_dev = eth_dev;
488 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
489 pdata->pci_dev = pci_dev;
492 (uint64_t)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
493 pdata->xprop_regs = pdata->xgmac_regs + AXGBE_MAC_PROP_OFFSET;
494 pdata->xi2c_regs = pdata->xgmac_regs + AXGBE_I2C_CTRL_OFFSET;
495 pdata->xpcs_regs = (uint64_t)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
497 /* version specific driver data*/
498 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
499 pdata->vdata = &axgbe_v2a;
501 pdata->vdata = &axgbe_v2b;
503 /* Configure the PCS indirect addressing support */
504 reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
505 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
506 pdata->xpcs_window <<= 6;
507 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
508 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
509 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
510 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
511 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
513 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
514 pdata->xpcs_window_size, pdata->xpcs_window_mask);
515 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
517 /* Retrieve the MAC address */
518 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
519 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
520 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
521 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
522 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
523 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
524 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
525 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
527 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
529 if (!eth_dev->data->mac_addrs) {
531 "Failed to alloc %u bytes needed to store MAC addr tbl",
536 if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
537 eth_random_addr(pdata->mac_addr.addr_bytes);
539 /* Copy the permanent MAC address */
540 ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
543 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
544 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
546 /* Set the DMA coherency values */
548 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
549 pdata->arcache = AXGBE_DMA_OS_ARCACHE;
550 pdata->awcache = AXGBE_DMA_OS_AWCACHE;
552 /* Set the maximum channels and queues */
553 reg = XP_IOREAD(pdata, XP_PROP_1);
554 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
555 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
556 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
557 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
559 /* Set the hardware channel and queue counts */
560 axgbe_set_counts(pdata);
562 /* Set the maximum fifo amounts */
563 reg = XP_IOREAD(pdata, XP_PROP_2);
564 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
565 pdata->tx_max_fifo_size *= 16384;
566 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
567 pdata->vdata->tx_max_fifo_size);
568 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
569 pdata->rx_max_fifo_size *= 16384;
570 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
571 pdata->vdata->rx_max_fifo_size);
572 /* Issue software reset to DMA */
573 ret = pdata->hw_if.exit(pdata);
575 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
577 /* Set default configuration data */
578 axgbe_default_config(pdata);
580 /* Set default max values if not provided */
581 if (!pdata->tx_max_fifo_size)
582 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
583 if (!pdata->rx_max_fifo_size)
584 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
586 pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
587 pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
588 pthread_mutex_init(&pdata->xpcs_mutex, NULL);
589 pthread_mutex_init(&pdata->i2c_mutex, NULL);
590 pthread_mutex_init(&pdata->an_mutex, NULL);
591 pthread_mutex_init(&pdata->phy_mutex, NULL);
593 ret = pdata->phy_if.phy_init(pdata);
595 rte_free(eth_dev->data->mac_addrs);
599 rte_intr_callback_register(&pci_dev->intr_handle,
600 axgbe_dev_interrupt_handler,
602 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
603 eth_dev->data->port_id, pci_dev->id.vendor_id,
604 pci_dev->id.device_id);
610 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
612 struct rte_pci_device *pci_dev;
614 PMD_INIT_FUNC_TRACE();
616 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
619 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
621 rte_free(eth_dev->data->mac_addrs);
622 eth_dev->data->mac_addrs = NULL;
623 eth_dev->dev_ops = NULL;
624 eth_dev->rx_pkt_burst = NULL;
625 eth_dev->tx_pkt_burst = NULL;
626 axgbe_dev_clear_queues(eth_dev);
628 /* disable uio intr before callback unregister */
629 rte_intr_disable(&pci_dev->intr_handle);
630 rte_intr_callback_unregister(&pci_dev->intr_handle,
631 axgbe_dev_interrupt_handler,
637 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
638 struct rte_pci_device *pci_dev)
640 return rte_eth_dev_pci_generic_probe(pci_dev,
641 sizeof(struct axgbe_port), eth_axgbe_dev_init);
644 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
646 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
649 static struct rte_pci_driver rte_axgbe_pmd = {
650 .id_table = pci_id_axgbe_map,
651 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
652 .probe = eth_axgbe_pci_probe,
653 .remove = eth_axgbe_pci_remove,
656 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
657 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
658 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
660 RTE_INIT(axgbe_init_log);
664 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
665 if (axgbe_logtype_init >= 0)
666 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
667 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
668 if (axgbe_logtype_driver >= 0)
669 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);