1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
11 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
12 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
13 static int axgbe_dev_configure(struct rte_eth_dev *dev);
14 static int axgbe_dev_start(struct rte_eth_dev *dev);
15 static void axgbe_dev_stop(struct rte_eth_dev *dev);
16 static void axgbe_dev_interrupt_handler(void *param);
17 static void axgbe_dev_close(struct rte_eth_dev *dev);
18 static void axgbe_dev_info_get(struct rte_eth_dev *dev,
19 struct rte_eth_dev_info *dev_info);
21 /* The set of PCI devices this driver supports */
22 #define AMD_PCI_VENDOR_ID 0x1022
23 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
24 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
26 int axgbe_logtype_init;
27 int axgbe_logtype_driver;
29 static const struct rte_pci_id pci_id_axgbe_map[] = {
30 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
31 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
35 static struct axgbe_version_data axgbe_v2a = {
36 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
37 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
39 .tx_max_fifo_size = 229376,
40 .rx_max_fifo_size = 229376,
41 .tx_tstamp_workaround = 1,
46 static struct axgbe_version_data axgbe_v2b = {
47 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
48 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
50 .tx_max_fifo_size = 65536,
51 .rx_max_fifo_size = 65536,
52 .tx_tstamp_workaround = 1,
57 static const struct rte_eth_desc_lim rx_desc_lim = {
58 .nb_max = AXGBE_MAX_RING_DESC,
59 .nb_min = AXGBE_MIN_RING_DESC,
63 static const struct rte_eth_desc_lim tx_desc_lim = {
64 .nb_max = AXGBE_MAX_RING_DESC,
65 .nb_min = AXGBE_MIN_RING_DESC,
69 static const struct eth_dev_ops axgbe_eth_dev_ops = {
70 .dev_configure = axgbe_dev_configure,
71 .dev_start = axgbe_dev_start,
72 .dev_stop = axgbe_dev_stop,
73 .dev_close = axgbe_dev_close,
74 .dev_infos_get = axgbe_dev_info_get,
75 .rx_queue_setup = axgbe_dev_rx_queue_setup,
76 .rx_queue_release = axgbe_dev_rx_queue_release,
77 .tx_queue_setup = axgbe_dev_tx_queue_setup,
78 .tx_queue_release = axgbe_dev_tx_queue_release,
81 static int axgbe_phy_reset(struct axgbe_port *pdata)
84 pdata->phy_speed = SPEED_UNKNOWN;
85 return pdata->phy_if.phy_reset(pdata);
89 * Interrupt handler triggered by NIC for handling
93 * Pointer to interrupt handle.
95 * The address of parameter (struct rte_eth_dev *) regsitered before.
101 axgbe_dev_interrupt_handler(void *param)
103 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
104 struct axgbe_port *pdata = dev->data->dev_private;
105 unsigned int dma_isr, dma_ch_isr;
107 pdata->phy_if.an_isr(pdata);
108 /*DMA related interrupts*/
109 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
113 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
116 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
118 DMA_CH_SR, dma_ch_isr);
121 /* Enable interrupts since disabled after generation*/
122 rte_intr_enable(&pdata->pci_dev->intr_handle);
126 * Configure device link speed and setup link.
127 * It returns 0 on success.
130 axgbe_dev_configure(struct rte_eth_dev *dev)
132 struct axgbe_port *pdata = dev->data->dev_private;
133 /* Checksum offload to hardware */
134 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
135 DEV_RX_OFFLOAD_CHECKSUM;
140 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
142 struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
144 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
145 pdata->rss_enable = 1;
146 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
147 pdata->rss_enable = 0;
154 axgbe_dev_start(struct rte_eth_dev *dev)
156 PMD_INIT_FUNC_TRACE();
157 struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
161 ret = axgbe_dev_rx_mq_config(dev);
163 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
166 ret = axgbe_phy_reset(pdata);
168 PMD_DRV_LOG(ERR, "phy reset failed\n");
171 ret = pdata->hw_if.init(pdata);
173 PMD_DRV_LOG(ERR, "dev_init failed\n");
177 /* enable uio/vfio intr/eventfd mapping */
178 rte_intr_enable(&pdata->pci_dev->intr_handle);
181 pdata->phy_if.phy_start(pdata);
182 axgbe_dev_enable_tx(dev);
183 axgbe_dev_enable_rx(dev);
185 axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
186 axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
190 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
192 axgbe_dev_stop(struct rte_eth_dev *dev)
194 PMD_INIT_FUNC_TRACE();
195 struct axgbe_port *pdata = dev->data->dev_private;
197 rte_intr_disable(&pdata->pci_dev->intr_handle);
199 if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
202 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
203 axgbe_dev_disable_tx(dev);
204 axgbe_dev_disable_rx(dev);
206 pdata->phy_if.phy_stop(pdata);
207 pdata->hw_if.exit(pdata);
208 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
209 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
212 /* Clear all resources like TX/RX queues. */
214 axgbe_dev_close(struct rte_eth_dev *dev)
216 axgbe_dev_clear_queues(dev);
220 axgbe_dev_info_get(struct rte_eth_dev *dev,
221 struct rte_eth_dev_info *dev_info)
223 struct axgbe_port *pdata = dev->data->dev_private;
225 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
226 dev_info->max_rx_queues = pdata->rx_ring_count;
227 dev_info->max_tx_queues = pdata->tx_ring_count;
228 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
229 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
230 dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
231 dev_info->speed_capa = ETH_LINK_SPEED_10G;
233 dev_info->rx_offload_capa =
234 DEV_RX_OFFLOAD_IPV4_CKSUM |
235 DEV_RX_OFFLOAD_UDP_CKSUM |
236 DEV_RX_OFFLOAD_TCP_CKSUM;
238 dev_info->tx_offload_capa =
239 DEV_TX_OFFLOAD_IPV4_CKSUM |
240 DEV_TX_OFFLOAD_UDP_CKSUM |
241 DEV_TX_OFFLOAD_TCP_CKSUM;
243 if (pdata->hw_feat.rss) {
244 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
245 dev_info->reta_size = pdata->hw_feat.hash_table_size;
246 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
249 dev_info->rx_desc_lim = rx_desc_lim;
250 dev_info->tx_desc_lim = tx_desc_lim;
252 dev_info->default_rxconf = (struct rte_eth_rxconf) {
253 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
256 dev_info->default_txconf = (struct rte_eth_txconf) {
257 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
258 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
259 ETH_TXQ_FLAGS_NOOFFLOADS,
263 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
265 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
266 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
268 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
269 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
270 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
272 memset(hw_feat, 0, sizeof(*hw_feat));
274 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
276 /* Hardware feature register 0 */
277 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
278 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
279 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
280 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
281 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
282 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
283 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
284 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
285 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
286 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
287 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
288 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
290 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
291 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
293 /* Hardware feature register 1 */
294 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
296 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
298 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
299 MAC_HWF1R, ADVTHWORD);
300 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
301 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
302 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
303 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
304 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
305 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
306 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
307 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
309 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
312 /* Hardware feature register 2 */
313 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
314 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
315 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
316 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
317 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
318 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
321 /* Translate the Hash Table size into actual number */
322 switch (hw_feat->hash_table_size) {
326 hw_feat->hash_table_size = 64;
329 hw_feat->hash_table_size = 128;
332 hw_feat->hash_table_size = 256;
336 /* Translate the address width setting into actual number */
337 switch (hw_feat->dma_width) {
339 hw_feat->dma_width = 32;
342 hw_feat->dma_width = 40;
345 hw_feat->dma_width = 48;
348 hw_feat->dma_width = 32;
351 /* The Queue, Channel and TC counts are zero based so increment them
352 * to get the actual number
356 hw_feat->rx_ch_cnt++;
357 hw_feat->tx_ch_cnt++;
360 /* Translate the fifo sizes into actual numbers */
361 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
362 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
365 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
367 axgbe_init_function_ptrs_dev(&pdata->hw_if);
368 axgbe_init_function_ptrs_phy(&pdata->phy_if);
369 axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
370 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
373 static void axgbe_set_counts(struct axgbe_port *pdata)
375 /* Set all the function pointers */
376 axgbe_init_all_fptrs(pdata);
378 /* Populate the hardware features */
379 axgbe_get_all_hw_features(pdata);
381 /* Set default max values if not provided */
382 if (!pdata->tx_max_channel_count)
383 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
384 if (!pdata->rx_max_channel_count)
385 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
387 if (!pdata->tx_max_q_count)
388 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
389 if (!pdata->rx_max_q_count)
390 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
392 /* Calculate the number of Tx and Rx rings to be created
393 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
394 * the number of Tx queues to the number of Tx channels
396 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
397 * number of Rx queues or maximum allowed
399 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
400 pdata->tx_max_channel_count);
401 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
402 pdata->tx_max_q_count);
404 pdata->tx_q_count = pdata->tx_ring_count;
406 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
407 pdata->rx_max_channel_count);
409 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
410 pdata->rx_max_q_count);
413 static void axgbe_default_config(struct axgbe_port *pdata)
415 pdata->pblx8 = DMA_PBL_X8_ENABLE;
416 pdata->tx_sf_mode = MTL_TSF_ENABLE;
417 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
418 pdata->tx_pbl = DMA_PBL_32;
419 pdata->tx_osp_mode = DMA_OSP_ENABLE;
420 pdata->rx_sf_mode = MTL_RSF_ENABLE;
421 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
422 pdata->rx_pbl = DMA_PBL_32;
423 pdata->pause_autoneg = 1;
426 pdata->phy_speed = SPEED_UNKNOWN;
427 pdata->power_down = 0;
431 * It returns 0 on success.
434 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
436 PMD_INIT_FUNC_TRACE();
437 struct axgbe_port *pdata;
438 struct rte_pci_device *pci_dev;
439 uint32_t reg, mac_lo, mac_hi;
442 eth_dev->dev_ops = &axgbe_eth_dev_ops;
443 eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
446 * For secondary processes, we don't initialise any further as primary
447 * has already done this work.
449 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
452 pdata = (struct axgbe_port *)eth_dev->data->dev_private;
454 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
455 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
456 pdata->eth_dev = eth_dev;
458 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
459 pdata->pci_dev = pci_dev;
462 (uint64_t)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
463 pdata->xprop_regs = pdata->xgmac_regs + AXGBE_MAC_PROP_OFFSET;
464 pdata->xi2c_regs = pdata->xgmac_regs + AXGBE_I2C_CTRL_OFFSET;
465 pdata->xpcs_regs = (uint64_t)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
467 /* version specific driver data*/
468 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
469 pdata->vdata = &axgbe_v2a;
471 pdata->vdata = &axgbe_v2b;
473 /* Configure the PCS indirect addressing support */
474 reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
475 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
476 pdata->xpcs_window <<= 6;
477 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
478 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
479 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
480 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
481 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
483 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
484 pdata->xpcs_window_size, pdata->xpcs_window_mask);
485 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
487 /* Retrieve the MAC address */
488 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
489 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
490 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
491 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
492 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
493 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
494 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
495 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
497 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
499 if (!eth_dev->data->mac_addrs) {
501 "Failed to alloc %u bytes needed to store MAC addr tbl",
506 if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
507 eth_random_addr(pdata->mac_addr.addr_bytes);
509 /* Copy the permanent MAC address */
510 ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
513 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
514 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
516 /* Set the DMA coherency values */
518 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
519 pdata->arcache = AXGBE_DMA_OS_ARCACHE;
520 pdata->awcache = AXGBE_DMA_OS_AWCACHE;
522 /* Set the maximum channels and queues */
523 reg = XP_IOREAD(pdata, XP_PROP_1);
524 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
525 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
526 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
527 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
529 /* Set the hardware channel and queue counts */
530 axgbe_set_counts(pdata);
532 /* Set the maximum fifo amounts */
533 reg = XP_IOREAD(pdata, XP_PROP_2);
534 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
535 pdata->tx_max_fifo_size *= 16384;
536 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
537 pdata->vdata->tx_max_fifo_size);
538 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
539 pdata->rx_max_fifo_size *= 16384;
540 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
541 pdata->vdata->rx_max_fifo_size);
542 /* Issue software reset to DMA */
543 ret = pdata->hw_if.exit(pdata);
545 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
547 /* Set default configuration data */
548 axgbe_default_config(pdata);
550 /* Set default max values if not provided */
551 if (!pdata->tx_max_fifo_size)
552 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
553 if (!pdata->rx_max_fifo_size)
554 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
556 pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
557 pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
558 pthread_mutex_init(&pdata->xpcs_mutex, NULL);
559 pthread_mutex_init(&pdata->i2c_mutex, NULL);
560 pthread_mutex_init(&pdata->an_mutex, NULL);
561 pthread_mutex_init(&pdata->phy_mutex, NULL);
563 ret = pdata->phy_if.phy_init(pdata);
565 rte_free(eth_dev->data->mac_addrs);
569 rte_intr_callback_register(&pci_dev->intr_handle,
570 axgbe_dev_interrupt_handler,
572 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
573 eth_dev->data->port_id, pci_dev->id.vendor_id,
574 pci_dev->id.device_id);
580 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
582 struct rte_pci_device *pci_dev;
584 PMD_INIT_FUNC_TRACE();
586 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
589 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
591 rte_free(eth_dev->data->mac_addrs);
592 eth_dev->data->mac_addrs = NULL;
593 eth_dev->dev_ops = NULL;
594 eth_dev->rx_pkt_burst = NULL;
595 eth_dev->tx_pkt_burst = NULL;
596 axgbe_dev_clear_queues(eth_dev);
598 /* disable uio intr before callback unregister */
599 rte_intr_disable(&pci_dev->intr_handle);
600 rte_intr_callback_unregister(&pci_dev->intr_handle,
601 axgbe_dev_interrupt_handler,
607 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
608 struct rte_pci_device *pci_dev)
610 return rte_eth_dev_pci_generic_probe(pci_dev,
611 sizeof(struct axgbe_port), eth_axgbe_dev_init);
614 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
616 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
619 static struct rte_pci_driver rte_axgbe_pmd = {
620 .id_table = pci_id_axgbe_map,
621 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
622 .probe = eth_axgbe_pci_probe,
623 .remove = eth_axgbe_pci_remove,
626 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
627 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
628 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
630 RTE_INIT(axgbe_init_log);
634 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
635 if (axgbe_logtype_init >= 0)
636 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
637 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
638 if (axgbe_logtype_driver >= 0)
639 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);