1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
10 #include "axgbe_regs.h"
12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
14 static int axgbe_dev_configure(struct rte_eth_dev *dev);
15 static int axgbe_dev_start(struct rte_eth_dev *dev);
16 static void axgbe_dev_stop(struct rte_eth_dev *dev);
17 static void axgbe_dev_interrupt_handler(void *param);
18 static void axgbe_dev_close(struct rte_eth_dev *dev);
19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev,
24 struct rte_ether_addr *mac_addr);
25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev,
26 struct rte_ether_addr *mac_addr,
29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
31 struct rte_ether_addr *mc_addr_set,
33 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
34 int wait_to_complete);
35 static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
36 struct rte_dev_reg_info *regs);
37 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
38 struct rte_eth_stats *stats);
39 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
40 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
41 struct rte_eth_xstat *stats,
44 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
45 struct rte_eth_xstat_name *xstats_names,
48 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
53 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
54 struct rte_eth_xstat_name *xstats_names,
57 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
58 static int axgbe_dev_info_get(struct rte_eth_dev *dev,
59 struct rte_eth_dev_info *dev_info);
62 char name[RTE_ETH_XSTATS_NAME_SIZE];
66 #define AXGMAC_MMC_STAT(_string, _var) \
68 offsetof(struct axgbe_mmc_stats, _var), \
71 static const struct axgbe_xstats axgbe_xstats_strings[] = {
72 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
73 AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
74 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
75 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
76 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
77 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
78 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
79 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
80 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
81 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
82 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
83 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
84 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
85 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
87 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
88 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
89 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
90 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
91 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
92 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
93 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
94 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
95 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
96 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
97 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
98 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
99 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
100 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
101 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
102 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
103 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
104 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
105 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
106 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
107 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
108 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
111 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings)
113 /* The set of PCI devices this driver supports */
114 #define AMD_PCI_VENDOR_ID 0x1022
115 #define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0
116 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
117 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
119 int axgbe_logtype_init;
120 int axgbe_logtype_driver;
122 static const struct rte_pci_id pci_id_axgbe_map[] = {
123 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
124 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
128 static struct axgbe_version_data axgbe_v2a = {
129 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
130 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
132 .tx_max_fifo_size = 229376,
133 .rx_max_fifo_size = 229376,
134 .tx_tstamp_workaround = 1,
137 .an_cdr_workaround = 1,
140 static struct axgbe_version_data axgbe_v2b = {
141 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
142 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
144 .tx_max_fifo_size = 65536,
145 .rx_max_fifo_size = 65536,
146 .tx_tstamp_workaround = 1,
149 .an_cdr_workaround = 1,
152 static const struct rte_eth_desc_lim rx_desc_lim = {
153 .nb_max = AXGBE_MAX_RING_DESC,
154 .nb_min = AXGBE_MIN_RING_DESC,
158 static const struct rte_eth_desc_lim tx_desc_lim = {
159 .nb_max = AXGBE_MAX_RING_DESC,
160 .nb_min = AXGBE_MIN_RING_DESC,
164 static const struct eth_dev_ops axgbe_eth_dev_ops = {
165 .dev_configure = axgbe_dev_configure,
166 .dev_start = axgbe_dev_start,
167 .dev_stop = axgbe_dev_stop,
168 .dev_close = axgbe_dev_close,
169 .promiscuous_enable = axgbe_dev_promiscuous_enable,
170 .promiscuous_disable = axgbe_dev_promiscuous_disable,
171 .allmulticast_enable = axgbe_dev_allmulticast_enable,
172 .allmulticast_disable = axgbe_dev_allmulticast_disable,
173 .mac_addr_set = axgbe_dev_mac_addr_set,
174 .mac_addr_add = axgbe_dev_mac_addr_add,
175 .mac_addr_remove = axgbe_dev_mac_addr_remove,
176 .set_mc_addr_list = axgbe_dev_set_mc_addr_list,
177 .link_update = axgbe_dev_link_update,
178 .get_reg = axgbe_dev_get_regs,
179 .stats_get = axgbe_dev_stats_get,
180 .stats_reset = axgbe_dev_stats_reset,
181 .xstats_get = axgbe_dev_xstats_get,
182 .xstats_reset = axgbe_dev_xstats_reset,
183 .xstats_get_names = axgbe_dev_xstats_get_names,
184 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
185 .xstats_get_by_id = axgbe_dev_xstats_get_by_id,
186 .dev_infos_get = axgbe_dev_info_get,
187 .rx_queue_setup = axgbe_dev_rx_queue_setup,
188 .rx_queue_release = axgbe_dev_rx_queue_release,
189 .tx_queue_setup = axgbe_dev_tx_queue_setup,
190 .tx_queue_release = axgbe_dev_tx_queue_release,
193 static int axgbe_phy_reset(struct axgbe_port *pdata)
195 pdata->phy_link = -1;
196 pdata->phy_speed = SPEED_UNKNOWN;
197 return pdata->phy_if.phy_reset(pdata);
201 * Interrupt handler triggered by NIC for handling
202 * specific interrupt.
205 * Pointer to interrupt handle.
207 * The address of parameter (struct rte_eth_dev *) regsitered before.
213 axgbe_dev_interrupt_handler(void *param)
215 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
216 struct axgbe_port *pdata = dev->data->dev_private;
217 unsigned int dma_isr, dma_ch_isr;
219 pdata->phy_if.an_isr(pdata);
220 /*DMA related interrupts*/
221 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
222 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
226 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
229 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
230 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
232 DMA_CH_SR, dma_ch_isr);
235 /* Unmask interrupts since disabled after generation */
236 rte_intr_ack(&pdata->pci_dev->intr_handle);
240 * Configure device link speed and setup link.
241 * It returns 0 on success.
244 axgbe_dev_configure(struct rte_eth_dev *dev)
246 struct axgbe_port *pdata = dev->data->dev_private;
247 /* Checksum offload to hardware */
248 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
249 DEV_RX_OFFLOAD_CHECKSUM;
254 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
256 struct axgbe_port *pdata = dev->data->dev_private;
258 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
259 pdata->rss_enable = 1;
260 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
261 pdata->rss_enable = 0;
268 axgbe_dev_start(struct rte_eth_dev *dev)
270 struct axgbe_port *pdata = dev->data->dev_private;
272 struct rte_eth_dev_data *dev_data = dev->data;
273 uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
275 dev->dev_ops = &axgbe_eth_dev_ops;
277 PMD_INIT_FUNC_TRACE();
280 ret = axgbe_dev_rx_mq_config(dev);
282 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
285 ret = axgbe_phy_reset(pdata);
287 PMD_DRV_LOG(ERR, "phy reset failed\n");
290 ret = pdata->hw_if.init(pdata);
292 PMD_DRV_LOG(ERR, "dev_init failed\n");
296 /* enable uio/vfio intr/eventfd mapping */
297 rte_intr_enable(&pdata->pci_dev->intr_handle);
300 pdata->phy_if.phy_start(pdata);
301 axgbe_dev_enable_tx(dev);
302 axgbe_dev_enable_rx(dev);
304 axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
305 axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
306 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
307 max_pkt_len > pdata->rx_buf_size)
308 dev_data->scattered_rx = 1;
310 /* Scatter Rx handling */
311 if (dev_data->scattered_rx)
312 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
314 dev->rx_pkt_burst = &axgbe_recv_pkts;
319 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
321 axgbe_dev_stop(struct rte_eth_dev *dev)
323 struct axgbe_port *pdata = dev->data->dev_private;
325 PMD_INIT_FUNC_TRACE();
327 rte_intr_disable(&pdata->pci_dev->intr_handle);
329 if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
332 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
333 axgbe_dev_disable_tx(dev);
334 axgbe_dev_disable_rx(dev);
336 pdata->phy_if.phy_stop(pdata);
337 pdata->hw_if.exit(pdata);
338 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
339 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
342 /* Clear all resources like TX/RX queues. */
344 axgbe_dev_close(struct rte_eth_dev *dev)
346 axgbe_dev_clear_queues(dev);
350 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
352 struct axgbe_port *pdata = dev->data->dev_private;
354 PMD_INIT_FUNC_TRACE();
356 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
362 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
364 struct axgbe_port *pdata = dev->data->dev_private;
366 PMD_INIT_FUNC_TRACE();
368 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
374 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
376 struct axgbe_port *pdata = dev->data->dev_private;
378 PMD_INIT_FUNC_TRACE();
380 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
382 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
388 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
390 struct axgbe_port *pdata = dev->data->dev_private;
392 PMD_INIT_FUNC_TRACE();
394 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
396 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
402 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
404 struct axgbe_port *pdata = dev->data->dev_private;
406 /* Set Default MAC Addr */
407 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0);
413 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
414 uint32_t index, uint32_t pool __rte_unused)
416 struct axgbe_port *pdata = dev->data->dev_private;
417 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
419 if (index > hw_feat->addn_mac) {
420 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
423 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
428 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
430 struct axgbe_port *pdata = dev->data->dev_private;
431 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
433 if (index > hw_feat->addn_mac) {
434 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
437 axgbe_set_mac_addn_addr(pdata, NULL, index);
441 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
442 struct rte_ether_addr *mc_addr_set,
445 struct axgbe_port *pdata = dev->data->dev_private;
446 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
447 uint32_t index = 1; /* 0 is always default mac */
450 if (nb_mc_addr > hw_feat->addn_mac) {
451 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
455 /* clear unicast addresses */
456 for (i = 1; i < hw_feat->addn_mac; i++) {
457 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
459 memset(&dev->data->mac_addrs[i], 0,
460 sizeof(struct rte_ether_addr));
464 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++);
469 /* return 0 means link status changed, -1 means not changed */
471 axgbe_dev_link_update(struct rte_eth_dev *dev,
472 int wait_to_complete __rte_unused)
474 struct axgbe_port *pdata = dev->data->dev_private;
475 struct rte_eth_link link;
478 PMD_INIT_FUNC_TRACE();
481 pdata->phy_if.phy_status(pdata);
483 memset(&link, 0, sizeof(struct rte_eth_link));
484 link.link_duplex = pdata->phy.duplex;
485 link.link_status = pdata->phy_link;
486 link.link_speed = pdata->phy_speed;
487 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
488 ETH_LINK_SPEED_FIXED);
489 ret = rte_eth_linkstatus_set(dev, &link);
491 PMD_DRV_LOG(ERR, "No change in link status\n");
497 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
499 struct axgbe_port *pdata = dev->data->dev_private;
501 if (regs->data == NULL) {
502 regs->length = axgbe_regs_get_count(pdata);
503 regs->width = sizeof(uint32_t);
507 /* Only full register dump is supported */
509 regs->length != (uint32_t)axgbe_regs_get_count(pdata))
512 regs->version = pdata->pci_dev->id.vendor_id << 16 |
513 pdata->pci_dev->id.device_id;
514 axgbe_regs_dump(pdata, regs->data);
517 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
519 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
521 /* Freeze counters */
522 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
525 stats->txoctetcount_gb +=
526 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
527 stats->txoctetcount_gb +=
528 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
530 stats->txframecount_gb +=
531 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
532 stats->txframecount_gb +=
533 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
535 stats->txbroadcastframes_g +=
536 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
537 stats->txbroadcastframes_g +=
538 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
540 stats->txmulticastframes_g +=
541 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
542 stats->txmulticastframes_g +=
543 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
545 stats->tx64octets_gb +=
546 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
547 stats->tx64octets_gb +=
548 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
550 stats->tx65to127octets_gb +=
551 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
552 stats->tx65to127octets_gb +=
553 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
555 stats->tx128to255octets_gb +=
556 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
557 stats->tx128to255octets_gb +=
558 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
560 stats->tx256to511octets_gb +=
561 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
562 stats->tx256to511octets_gb +=
563 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
565 stats->tx512to1023octets_gb +=
566 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
567 stats->tx512to1023octets_gb +=
568 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
570 stats->tx1024tomaxoctets_gb +=
571 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
572 stats->tx1024tomaxoctets_gb +=
573 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
575 stats->txunicastframes_gb +=
576 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
577 stats->txunicastframes_gb +=
578 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
580 stats->txmulticastframes_gb +=
581 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
582 stats->txmulticastframes_gb +=
583 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
585 stats->txbroadcastframes_g +=
586 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
587 stats->txbroadcastframes_g +=
588 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
590 stats->txunderflowerror +=
591 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
592 stats->txunderflowerror +=
593 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
595 stats->txoctetcount_g +=
596 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
597 stats->txoctetcount_g +=
598 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
600 stats->txframecount_g +=
601 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
602 stats->txframecount_g +=
603 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
605 stats->txpauseframes +=
606 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
607 stats->txpauseframes +=
608 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
610 stats->txvlanframes_g +=
611 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
612 stats->txvlanframes_g +=
613 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
616 stats->rxframecount_gb +=
617 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
618 stats->rxframecount_gb +=
619 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
621 stats->rxoctetcount_gb +=
622 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
623 stats->rxoctetcount_gb +=
624 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
626 stats->rxoctetcount_g +=
627 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
628 stats->rxoctetcount_g +=
629 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
631 stats->rxbroadcastframes_g +=
632 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
633 stats->rxbroadcastframes_g +=
634 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
636 stats->rxmulticastframes_g +=
637 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
638 stats->rxmulticastframes_g +=
639 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
642 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
644 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
646 stats->rxrunterror +=
647 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
649 stats->rxjabbererror +=
650 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
652 stats->rxundersize_g +=
653 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
655 stats->rxoversize_g +=
656 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
658 stats->rx64octets_gb +=
659 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
660 stats->rx64octets_gb +=
661 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
663 stats->rx65to127octets_gb +=
664 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
665 stats->rx65to127octets_gb +=
666 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
668 stats->rx128to255octets_gb +=
669 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
670 stats->rx128to255octets_gb +=
671 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
673 stats->rx256to511octets_gb +=
674 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
675 stats->rx256to511octets_gb +=
676 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
678 stats->rx512to1023octets_gb +=
679 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
680 stats->rx512to1023octets_gb +=
681 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
683 stats->rx1024tomaxoctets_gb +=
684 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
685 stats->rx1024tomaxoctets_gb +=
686 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
688 stats->rxunicastframes_g +=
689 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
690 stats->rxunicastframes_g +=
691 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
693 stats->rxlengtherror +=
694 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
695 stats->rxlengtherror +=
696 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
698 stats->rxoutofrangetype +=
699 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
700 stats->rxoutofrangetype +=
701 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
703 stats->rxpauseframes +=
704 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
705 stats->rxpauseframes +=
706 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
708 stats->rxfifooverflow +=
709 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
710 stats->rxfifooverflow +=
711 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
713 stats->rxvlanframes_gb +=
714 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
715 stats->rxvlanframes_gb +=
716 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
718 stats->rxwatchdogerror +=
719 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
721 /* Un-freeze counters */
722 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
726 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
729 struct axgbe_port *pdata = dev->data->dev_private;
735 axgbe_read_mmc_stats(pdata);
737 for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
739 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
740 axgbe_xstats_strings[i].offset);
747 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
748 struct rte_eth_xstat_name *xstats_names,
753 if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
754 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
755 snprintf(xstats_names[i].name,
756 RTE_ETH_XSTATS_NAME_SIZE, "%s",
757 axgbe_xstats_strings[i].name);
761 return AXGBE_XSTATS_COUNT;
765 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
766 uint64_t *values, unsigned int n)
769 uint64_t values_copy[AXGBE_XSTATS_COUNT];
772 struct axgbe_port *pdata = dev->data->dev_private;
774 if (n < AXGBE_XSTATS_COUNT)
775 return AXGBE_XSTATS_COUNT;
777 axgbe_read_mmc_stats(pdata);
779 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
780 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
781 axgbe_xstats_strings[i].offset);
787 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
789 for (i = 0; i < n; i++) {
790 if (ids[i] >= AXGBE_XSTATS_COUNT) {
791 PMD_DRV_LOG(ERR, "id value isn't valid\n");
794 values[i] = values_copy[ids[i]];
800 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
801 struct rte_eth_xstat_name *xstats_names,
805 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
809 return axgbe_dev_xstats_get_names(dev, xstats_names, size);
811 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
813 for (i = 0; i < size; i++) {
814 if (ids[i] >= AXGBE_XSTATS_COUNT) {
815 PMD_DRV_LOG(ERR, "id value isn't valid\n");
818 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
824 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
826 struct axgbe_port *pdata = dev->data->dev_private;
827 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
829 /* MMC registers are configured for reset on read */
830 axgbe_read_mmc_stats(pdata);
833 memset(stats, 0, sizeof(*stats));
839 axgbe_dev_stats_get(struct rte_eth_dev *dev,
840 struct rte_eth_stats *stats)
842 struct axgbe_rx_queue *rxq;
843 struct axgbe_tx_queue *txq;
844 struct axgbe_port *pdata = dev->data->dev_private;
845 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
848 axgbe_read_mmc_stats(pdata);
850 stats->imissed = mmc_stats->rxfifooverflow;
852 for (i = 0; i < dev->data->nb_rx_queues; i++) {
853 rxq = dev->data->rx_queues[i];
854 stats->q_ipackets[i] = rxq->pkts;
855 stats->ipackets += rxq->pkts;
856 stats->q_ibytes[i] = rxq->bytes;
857 stats->ibytes += rxq->bytes;
858 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
859 stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed;
860 stats->ierrors += rxq->errors;
863 for (i = 0; i < dev->data->nb_tx_queues; i++) {
864 txq = dev->data->tx_queues[i];
865 stats->q_opackets[i] = txq->pkts;
866 stats->opackets += txq->pkts;
867 stats->q_obytes[i] = txq->bytes;
868 stats->obytes += txq->bytes;
869 stats->oerrors += txq->errors;
876 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
878 struct axgbe_rx_queue *rxq;
879 struct axgbe_tx_queue *txq;
882 for (i = 0; i < dev->data->nb_rx_queues; i++) {
883 rxq = dev->data->rx_queues[i];
887 rxq->rx_mbuf_alloc_failed = 0;
889 for (i = 0; i < dev->data->nb_tx_queues; i++) {
890 txq = dev->data->tx_queues[i];
900 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
902 struct axgbe_port *pdata = dev->data->dev_private;
904 dev_info->max_rx_queues = pdata->rx_ring_count;
905 dev_info->max_tx_queues = pdata->tx_ring_count;
906 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
907 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
908 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
909 dev_info->speed_capa = ETH_LINK_SPEED_10G;
911 dev_info->rx_offload_capa =
912 DEV_RX_OFFLOAD_IPV4_CKSUM |
913 DEV_RX_OFFLOAD_UDP_CKSUM |
914 DEV_RX_OFFLOAD_TCP_CKSUM |
915 DEV_RX_OFFLOAD_JUMBO_FRAME |
916 DEV_RX_OFFLOAD_SCATTER |
917 DEV_RX_OFFLOAD_KEEP_CRC;
919 dev_info->tx_offload_capa =
920 DEV_TX_OFFLOAD_IPV4_CKSUM |
921 DEV_TX_OFFLOAD_UDP_CKSUM |
922 DEV_TX_OFFLOAD_TCP_CKSUM;
924 if (pdata->hw_feat.rss) {
925 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
926 dev_info->reta_size = pdata->hw_feat.hash_table_size;
927 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
930 dev_info->rx_desc_lim = rx_desc_lim;
931 dev_info->tx_desc_lim = tx_desc_lim;
933 dev_info->default_rxconf = (struct rte_eth_rxconf) {
934 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
937 dev_info->default_txconf = (struct rte_eth_txconf) {
938 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
944 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
946 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
947 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
949 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
950 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
951 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
953 memset(hw_feat, 0, sizeof(*hw_feat));
955 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
957 /* Hardware feature register 0 */
958 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
959 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
960 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
961 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
962 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
963 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
964 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
965 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
966 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
967 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
968 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
969 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
971 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
972 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
974 /* Hardware feature register 1 */
975 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
977 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
979 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
980 MAC_HWF1R, ADVTHWORD);
981 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
982 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
983 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
984 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
985 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
986 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
987 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
988 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
990 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
993 /* Hardware feature register 2 */
994 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
995 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
996 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
997 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
998 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
999 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
1002 /* Translate the Hash Table size into actual number */
1003 switch (hw_feat->hash_table_size) {
1007 hw_feat->hash_table_size = 64;
1010 hw_feat->hash_table_size = 128;
1013 hw_feat->hash_table_size = 256;
1017 /* Translate the address width setting into actual number */
1018 switch (hw_feat->dma_width) {
1020 hw_feat->dma_width = 32;
1023 hw_feat->dma_width = 40;
1026 hw_feat->dma_width = 48;
1029 hw_feat->dma_width = 32;
1032 /* The Queue, Channel and TC counts are zero based so increment them
1033 * to get the actual number
1035 hw_feat->rx_q_cnt++;
1036 hw_feat->tx_q_cnt++;
1037 hw_feat->rx_ch_cnt++;
1038 hw_feat->tx_ch_cnt++;
1041 /* Translate the fifo sizes into actual numbers */
1042 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
1043 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
1046 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
1048 axgbe_init_function_ptrs_dev(&pdata->hw_if);
1049 axgbe_init_function_ptrs_phy(&pdata->phy_if);
1050 axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
1051 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
1054 static void axgbe_set_counts(struct axgbe_port *pdata)
1056 /* Set all the function pointers */
1057 axgbe_init_all_fptrs(pdata);
1059 /* Populate the hardware features */
1060 axgbe_get_all_hw_features(pdata);
1062 /* Set default max values if not provided */
1063 if (!pdata->tx_max_channel_count)
1064 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
1065 if (!pdata->rx_max_channel_count)
1066 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
1068 if (!pdata->tx_max_q_count)
1069 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
1070 if (!pdata->rx_max_q_count)
1071 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
1073 /* Calculate the number of Tx and Rx rings to be created
1074 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
1075 * the number of Tx queues to the number of Tx channels
1077 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
1078 * number of Rx queues or maximum allowed
1080 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
1081 pdata->tx_max_channel_count);
1082 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
1083 pdata->tx_max_q_count);
1085 pdata->tx_q_count = pdata->tx_ring_count;
1087 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
1088 pdata->rx_max_channel_count);
1090 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
1091 pdata->rx_max_q_count);
1094 static void axgbe_default_config(struct axgbe_port *pdata)
1096 pdata->pblx8 = DMA_PBL_X8_ENABLE;
1097 pdata->tx_sf_mode = MTL_TSF_ENABLE;
1098 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
1099 pdata->tx_pbl = DMA_PBL_32;
1100 pdata->tx_osp_mode = DMA_OSP_ENABLE;
1101 pdata->rx_sf_mode = MTL_RSF_ENABLE;
1102 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
1103 pdata->rx_pbl = DMA_PBL_32;
1104 pdata->pause_autoneg = 1;
1105 pdata->tx_pause = 0;
1106 pdata->rx_pause = 0;
1107 pdata->phy_speed = SPEED_UNKNOWN;
1108 pdata->power_down = 0;
1112 pci_device_cmp(const struct rte_device *dev, const void *_pci_id)
1114 const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
1115 const struct rte_pci_id *pcid = _pci_id;
1117 if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID &&
1118 pdev->id.device_id == pcid->device_id)
1124 pci_search_device(int device_id)
1126 struct rte_bus *pci_bus;
1127 struct rte_pci_id dev_id;
1129 dev_id.device_id = device_id;
1130 pci_bus = rte_bus_find_by_name("pci");
1131 return (pci_bus != NULL) &&
1132 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL);
1136 * It returns 0 on success.
1139 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
1141 PMD_INIT_FUNC_TRACE();
1142 struct axgbe_port *pdata;
1143 struct rte_pci_device *pci_dev;
1144 uint32_t reg, mac_lo, mac_hi;
1148 eth_dev->dev_ops = &axgbe_eth_dev_ops;
1151 * For secondary processes, we don't initialise any further as primary
1152 * has already done this work.
1154 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1157 pdata = eth_dev->data->dev_private;
1159 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
1160 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
1161 pdata->eth_dev = eth_dev;
1163 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1164 pdata->pci_dev = pci_dev;
1167 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
1169 if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) {
1170 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
1171 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
1173 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
1174 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
1178 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
1179 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
1180 + AXGBE_MAC_PROP_OFFSET);
1181 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
1182 + AXGBE_I2C_CTRL_OFFSET);
1183 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
1185 /* version specific driver data*/
1186 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
1187 pdata->vdata = &axgbe_v2a;
1189 pdata->vdata = &axgbe_v2b;
1191 /* Configure the PCS indirect addressing support */
1192 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
1193 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
1194 pdata->xpcs_window <<= 6;
1195 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
1196 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
1197 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
1200 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
1201 pdata->xpcs_window_size, pdata->xpcs_window_mask);
1202 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
1204 /* Retrieve the MAC address */
1205 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
1206 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
1207 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
1208 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
1209 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
1210 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
1211 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
1212 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
1214 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS;
1215 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0);
1217 if (!eth_dev->data->mac_addrs) {
1219 "Failed to alloc %u bytes needed to "
1220 "store MAC addresses", len);
1224 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
1225 rte_eth_random_addr(pdata->mac_addr.addr_bytes);
1227 /* Copy the permanent MAC address */
1228 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
1230 /* Clock settings */
1231 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
1232 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1234 /* Set the DMA coherency values */
1235 pdata->coherent = 1;
1236 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
1237 pdata->arcache = AXGBE_DMA_OS_ARCACHE;
1238 pdata->awcache = AXGBE_DMA_OS_AWCACHE;
1240 /* Set the maximum channels and queues */
1241 reg = XP_IOREAD(pdata, XP_PROP_1);
1242 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
1243 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
1244 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
1245 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
1247 /* Set the hardware channel and queue counts */
1248 axgbe_set_counts(pdata);
1250 /* Set the maximum fifo amounts */
1251 reg = XP_IOREAD(pdata, XP_PROP_2);
1252 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
1253 pdata->tx_max_fifo_size *= 16384;
1254 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
1255 pdata->vdata->tx_max_fifo_size);
1256 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
1257 pdata->rx_max_fifo_size *= 16384;
1258 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
1259 pdata->vdata->rx_max_fifo_size);
1260 /* Issue software reset to DMA */
1261 ret = pdata->hw_if.exit(pdata);
1263 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
1265 /* Set default configuration data */
1266 axgbe_default_config(pdata);
1268 /* Set default max values if not provided */
1269 if (!pdata->tx_max_fifo_size)
1270 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
1271 if (!pdata->rx_max_fifo_size)
1272 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
1274 pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
1275 pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
1276 pthread_mutex_init(&pdata->xpcs_mutex, NULL);
1277 pthread_mutex_init(&pdata->i2c_mutex, NULL);
1278 pthread_mutex_init(&pdata->an_mutex, NULL);
1279 pthread_mutex_init(&pdata->phy_mutex, NULL);
1281 ret = pdata->phy_if.phy_init(pdata);
1283 rte_free(eth_dev->data->mac_addrs);
1284 eth_dev->data->mac_addrs = NULL;
1288 rte_intr_callback_register(&pci_dev->intr_handle,
1289 axgbe_dev_interrupt_handler,
1291 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1292 eth_dev->data->port_id, pci_dev->id.vendor_id,
1293 pci_dev->id.device_id);
1299 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1301 struct rte_pci_device *pci_dev;
1303 PMD_INIT_FUNC_TRACE();
1305 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1308 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1309 eth_dev->dev_ops = NULL;
1310 eth_dev->rx_pkt_burst = NULL;
1311 eth_dev->tx_pkt_burst = NULL;
1312 axgbe_dev_clear_queues(eth_dev);
1314 /* disable uio intr before callback unregister */
1315 rte_intr_disable(&pci_dev->intr_handle);
1316 rte_intr_callback_unregister(&pci_dev->intr_handle,
1317 axgbe_dev_interrupt_handler,
1323 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1324 struct rte_pci_device *pci_dev)
1326 return rte_eth_dev_pci_generic_probe(pci_dev,
1327 sizeof(struct axgbe_port), eth_axgbe_dev_init);
1330 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
1332 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
1335 static struct rte_pci_driver rte_axgbe_pmd = {
1336 .id_table = pci_id_axgbe_map,
1337 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1338 .probe = eth_axgbe_pci_probe,
1339 .remove = eth_axgbe_pci_remove,
1342 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
1343 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
1344 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1346 RTE_INIT(axgbe_init_log)
1348 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
1349 if (axgbe_logtype_init >= 0)
1350 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
1351 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
1352 if (axgbe_logtype_driver >= 0)
1353 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);