1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
10 #include "axgbe_regs.h"
12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
14 static int axgbe_dev_configure(struct rte_eth_dev *dev);
15 static int axgbe_dev_start(struct rte_eth_dev *dev);
16 static void axgbe_dev_stop(struct rte_eth_dev *dev);
17 static void axgbe_dev_interrupt_handler(void *param);
18 static void axgbe_dev_close(struct rte_eth_dev *dev);
19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev,
24 struct rte_ether_addr *mac_addr);
25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev,
26 struct rte_ether_addr *mac_addr,
29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
31 struct rte_ether_addr *mc_addr_set,
33 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
34 struct rte_ether_addr *mac_addr,
36 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev,
38 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
39 int wait_to_complete);
40 static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
41 struct rte_dev_reg_info *regs);
42 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
43 struct rte_eth_stats *stats);
44 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
45 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
46 struct rte_eth_xstat *stats,
49 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
50 struct rte_eth_xstat_name *xstats_names,
53 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
58 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
59 struct rte_eth_xstat_name *xstats_names,
62 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
63 static int axgbe_dev_info_get(struct rte_eth_dev *dev,
64 struct rte_eth_dev_info *dev_info);
67 char name[RTE_ETH_XSTATS_NAME_SIZE];
71 #define AXGMAC_MMC_STAT(_string, _var) \
73 offsetof(struct axgbe_mmc_stats, _var), \
76 static const struct axgbe_xstats axgbe_xstats_strings[] = {
77 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
78 AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
79 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
80 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
81 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
82 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
83 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
84 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
85 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
86 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
87 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
88 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
89 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
90 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
92 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
93 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
94 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
95 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
96 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
97 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
98 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
99 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
100 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
101 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
102 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
103 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
104 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
105 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
106 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
107 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
108 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
109 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
110 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
111 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
112 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
113 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
116 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings)
118 /* The set of PCI devices this driver supports */
119 #define AMD_PCI_VENDOR_ID 0x1022
120 #define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0
121 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
122 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
124 int axgbe_logtype_init;
125 int axgbe_logtype_driver;
127 static const struct rte_pci_id pci_id_axgbe_map[] = {
128 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
129 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
133 static struct axgbe_version_data axgbe_v2a = {
134 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
135 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
137 .tx_max_fifo_size = 229376,
138 .rx_max_fifo_size = 229376,
139 .tx_tstamp_workaround = 1,
142 .an_cdr_workaround = 1,
145 static struct axgbe_version_data axgbe_v2b = {
146 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
147 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
149 .tx_max_fifo_size = 65536,
150 .rx_max_fifo_size = 65536,
151 .tx_tstamp_workaround = 1,
154 .an_cdr_workaround = 1,
157 static const struct rte_eth_desc_lim rx_desc_lim = {
158 .nb_max = AXGBE_MAX_RING_DESC,
159 .nb_min = AXGBE_MIN_RING_DESC,
163 static const struct rte_eth_desc_lim tx_desc_lim = {
164 .nb_max = AXGBE_MAX_RING_DESC,
165 .nb_min = AXGBE_MIN_RING_DESC,
169 static const struct eth_dev_ops axgbe_eth_dev_ops = {
170 .dev_configure = axgbe_dev_configure,
171 .dev_start = axgbe_dev_start,
172 .dev_stop = axgbe_dev_stop,
173 .dev_close = axgbe_dev_close,
174 .promiscuous_enable = axgbe_dev_promiscuous_enable,
175 .promiscuous_disable = axgbe_dev_promiscuous_disable,
176 .allmulticast_enable = axgbe_dev_allmulticast_enable,
177 .allmulticast_disable = axgbe_dev_allmulticast_disable,
178 .mac_addr_set = axgbe_dev_mac_addr_set,
179 .mac_addr_add = axgbe_dev_mac_addr_add,
180 .mac_addr_remove = axgbe_dev_mac_addr_remove,
181 .set_mc_addr_list = axgbe_dev_set_mc_addr_list,
182 .uc_hash_table_set = axgbe_dev_uc_hash_table_set,
183 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set,
184 .link_update = axgbe_dev_link_update,
185 .get_reg = axgbe_dev_get_regs,
186 .stats_get = axgbe_dev_stats_get,
187 .stats_reset = axgbe_dev_stats_reset,
188 .xstats_get = axgbe_dev_xstats_get,
189 .xstats_reset = axgbe_dev_xstats_reset,
190 .xstats_get_names = axgbe_dev_xstats_get_names,
191 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
192 .xstats_get_by_id = axgbe_dev_xstats_get_by_id,
193 .dev_infos_get = axgbe_dev_info_get,
194 .rx_queue_setup = axgbe_dev_rx_queue_setup,
195 .rx_queue_release = axgbe_dev_rx_queue_release,
196 .tx_queue_setup = axgbe_dev_tx_queue_setup,
197 .tx_queue_release = axgbe_dev_tx_queue_release,
200 static int axgbe_phy_reset(struct axgbe_port *pdata)
202 pdata->phy_link = -1;
203 pdata->phy_speed = SPEED_UNKNOWN;
204 return pdata->phy_if.phy_reset(pdata);
208 * Interrupt handler triggered by NIC for handling
209 * specific interrupt.
212 * Pointer to interrupt handle.
214 * The address of parameter (struct rte_eth_dev *) regsitered before.
220 axgbe_dev_interrupt_handler(void *param)
222 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
223 struct axgbe_port *pdata = dev->data->dev_private;
224 unsigned int dma_isr, dma_ch_isr;
226 pdata->phy_if.an_isr(pdata);
227 /*DMA related interrupts*/
228 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
229 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
233 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
236 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
237 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
239 DMA_CH_SR, dma_ch_isr);
242 /* Unmask interrupts since disabled after generation */
243 rte_intr_ack(&pdata->pci_dev->intr_handle);
247 * Configure device link speed and setup link.
248 * It returns 0 on success.
251 axgbe_dev_configure(struct rte_eth_dev *dev)
253 struct axgbe_port *pdata = dev->data->dev_private;
254 /* Checksum offload to hardware */
255 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
256 DEV_RX_OFFLOAD_CHECKSUM;
261 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
263 struct axgbe_port *pdata = dev->data->dev_private;
265 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
266 pdata->rss_enable = 1;
267 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
268 pdata->rss_enable = 0;
275 axgbe_dev_start(struct rte_eth_dev *dev)
277 struct axgbe_port *pdata = dev->data->dev_private;
279 struct rte_eth_dev_data *dev_data = dev->data;
280 uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
282 dev->dev_ops = &axgbe_eth_dev_ops;
284 PMD_INIT_FUNC_TRACE();
287 ret = axgbe_dev_rx_mq_config(dev);
289 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
292 ret = axgbe_phy_reset(pdata);
294 PMD_DRV_LOG(ERR, "phy reset failed\n");
297 ret = pdata->hw_if.init(pdata);
299 PMD_DRV_LOG(ERR, "dev_init failed\n");
303 /* enable uio/vfio intr/eventfd mapping */
304 rte_intr_enable(&pdata->pci_dev->intr_handle);
307 pdata->phy_if.phy_start(pdata);
308 axgbe_dev_enable_tx(dev);
309 axgbe_dev_enable_rx(dev);
311 axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
312 axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
313 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
314 max_pkt_len > pdata->rx_buf_size)
315 dev_data->scattered_rx = 1;
317 /* Scatter Rx handling */
318 if (dev_data->scattered_rx)
319 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
321 dev->rx_pkt_burst = &axgbe_recv_pkts;
326 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
328 axgbe_dev_stop(struct rte_eth_dev *dev)
330 struct axgbe_port *pdata = dev->data->dev_private;
332 PMD_INIT_FUNC_TRACE();
334 rte_intr_disable(&pdata->pci_dev->intr_handle);
336 if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
339 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
340 axgbe_dev_disable_tx(dev);
341 axgbe_dev_disable_rx(dev);
343 pdata->phy_if.phy_stop(pdata);
344 pdata->hw_if.exit(pdata);
345 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
346 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
349 /* Clear all resources like TX/RX queues. */
351 axgbe_dev_close(struct rte_eth_dev *dev)
353 axgbe_dev_clear_queues(dev);
357 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
359 struct axgbe_port *pdata = dev->data->dev_private;
361 PMD_INIT_FUNC_TRACE();
363 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
369 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
371 struct axgbe_port *pdata = dev->data->dev_private;
373 PMD_INIT_FUNC_TRACE();
375 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
381 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
383 struct axgbe_port *pdata = dev->data->dev_private;
385 PMD_INIT_FUNC_TRACE();
387 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
389 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
395 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
397 struct axgbe_port *pdata = dev->data->dev_private;
399 PMD_INIT_FUNC_TRACE();
401 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
403 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
409 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
411 struct axgbe_port *pdata = dev->data->dev_private;
413 /* Set Default MAC Addr */
414 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0);
420 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
421 uint32_t index, uint32_t pool __rte_unused)
423 struct axgbe_port *pdata = dev->data->dev_private;
424 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
426 if (index > hw_feat->addn_mac) {
427 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
430 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
435 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
437 struct axgbe_port *pdata = dev->data->dev_private;
438 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
440 if (index > hw_feat->addn_mac) {
441 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
444 axgbe_set_mac_addn_addr(pdata, NULL, index);
448 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
449 struct rte_ether_addr *mc_addr_set,
452 struct axgbe_port *pdata = dev->data->dev_private;
453 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
454 uint32_t index = 1; /* 0 is always default mac */
457 if (nb_mc_addr > hw_feat->addn_mac) {
458 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
462 /* clear unicast addresses */
463 for (i = 1; i < hw_feat->addn_mac; i++) {
464 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
466 memset(&dev->data->mac_addrs[i], 0,
467 sizeof(struct rte_ether_addr));
471 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++);
477 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
478 struct rte_ether_addr *mac_addr, uint8_t add)
480 struct axgbe_port *pdata = dev->data->dev_private;
481 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
483 if (!hw_feat->hash_table_size) {
484 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
488 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add);
490 if (pdata->uc_hash_mac_addr > 0) {
491 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
492 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
494 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
495 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
501 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
503 struct axgbe_port *pdata = dev->data->dev_private;
504 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
507 if (!hw_feat->hash_table_size) {
508 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
512 for (index = 0; index < pdata->hash_table_count; index++) {
514 pdata->uc_hash_table[index] = ~0;
516 pdata->uc_hash_table[index] = 0;
518 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n",
519 add ? "set" : "clear", index);
521 AXGMAC_IOWRITE(pdata, MAC_HTR(index),
522 pdata->uc_hash_table[index]);
526 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
527 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
529 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
530 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
535 /* return 0 means link status changed, -1 means not changed */
537 axgbe_dev_link_update(struct rte_eth_dev *dev,
538 int wait_to_complete __rte_unused)
540 struct axgbe_port *pdata = dev->data->dev_private;
541 struct rte_eth_link link;
544 PMD_INIT_FUNC_TRACE();
547 pdata->phy_if.phy_status(pdata);
549 memset(&link, 0, sizeof(struct rte_eth_link));
550 link.link_duplex = pdata->phy.duplex;
551 link.link_status = pdata->phy_link;
552 link.link_speed = pdata->phy_speed;
553 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
554 ETH_LINK_SPEED_FIXED);
555 ret = rte_eth_linkstatus_set(dev, &link);
557 PMD_DRV_LOG(ERR, "No change in link status\n");
563 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
565 struct axgbe_port *pdata = dev->data->dev_private;
567 if (regs->data == NULL) {
568 regs->length = axgbe_regs_get_count(pdata);
569 regs->width = sizeof(uint32_t);
573 /* Only full register dump is supported */
575 regs->length != (uint32_t)axgbe_regs_get_count(pdata))
578 regs->version = pdata->pci_dev->id.vendor_id << 16 |
579 pdata->pci_dev->id.device_id;
580 axgbe_regs_dump(pdata, regs->data);
583 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
585 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
587 /* Freeze counters */
588 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
591 stats->txoctetcount_gb +=
592 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
593 stats->txoctetcount_gb +=
594 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
596 stats->txframecount_gb +=
597 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
598 stats->txframecount_gb +=
599 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
601 stats->txbroadcastframes_g +=
602 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
603 stats->txbroadcastframes_g +=
604 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
606 stats->txmulticastframes_g +=
607 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
608 stats->txmulticastframes_g +=
609 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
611 stats->tx64octets_gb +=
612 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
613 stats->tx64octets_gb +=
614 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
616 stats->tx65to127octets_gb +=
617 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
618 stats->tx65to127octets_gb +=
619 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
621 stats->tx128to255octets_gb +=
622 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
623 stats->tx128to255octets_gb +=
624 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
626 stats->tx256to511octets_gb +=
627 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
628 stats->tx256to511octets_gb +=
629 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
631 stats->tx512to1023octets_gb +=
632 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
633 stats->tx512to1023octets_gb +=
634 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
636 stats->tx1024tomaxoctets_gb +=
637 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
638 stats->tx1024tomaxoctets_gb +=
639 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
641 stats->txunicastframes_gb +=
642 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
643 stats->txunicastframes_gb +=
644 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
646 stats->txmulticastframes_gb +=
647 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
648 stats->txmulticastframes_gb +=
649 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
651 stats->txbroadcastframes_g +=
652 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
653 stats->txbroadcastframes_g +=
654 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
656 stats->txunderflowerror +=
657 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
658 stats->txunderflowerror +=
659 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
661 stats->txoctetcount_g +=
662 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
663 stats->txoctetcount_g +=
664 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
666 stats->txframecount_g +=
667 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
668 stats->txframecount_g +=
669 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
671 stats->txpauseframes +=
672 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
673 stats->txpauseframes +=
674 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
676 stats->txvlanframes_g +=
677 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
678 stats->txvlanframes_g +=
679 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
682 stats->rxframecount_gb +=
683 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
684 stats->rxframecount_gb +=
685 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
687 stats->rxoctetcount_gb +=
688 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
689 stats->rxoctetcount_gb +=
690 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
692 stats->rxoctetcount_g +=
693 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
694 stats->rxoctetcount_g +=
695 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
697 stats->rxbroadcastframes_g +=
698 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
699 stats->rxbroadcastframes_g +=
700 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
702 stats->rxmulticastframes_g +=
703 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
704 stats->rxmulticastframes_g +=
705 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
708 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
710 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
712 stats->rxrunterror +=
713 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
715 stats->rxjabbererror +=
716 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
718 stats->rxundersize_g +=
719 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
721 stats->rxoversize_g +=
722 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
724 stats->rx64octets_gb +=
725 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
726 stats->rx64octets_gb +=
727 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
729 stats->rx65to127octets_gb +=
730 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
731 stats->rx65to127octets_gb +=
732 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
734 stats->rx128to255octets_gb +=
735 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
736 stats->rx128to255octets_gb +=
737 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
739 stats->rx256to511octets_gb +=
740 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
741 stats->rx256to511octets_gb +=
742 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
744 stats->rx512to1023octets_gb +=
745 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
746 stats->rx512to1023octets_gb +=
747 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
749 stats->rx1024tomaxoctets_gb +=
750 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
751 stats->rx1024tomaxoctets_gb +=
752 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
754 stats->rxunicastframes_g +=
755 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
756 stats->rxunicastframes_g +=
757 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
759 stats->rxlengtherror +=
760 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
761 stats->rxlengtherror +=
762 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
764 stats->rxoutofrangetype +=
765 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
766 stats->rxoutofrangetype +=
767 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
769 stats->rxpauseframes +=
770 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
771 stats->rxpauseframes +=
772 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
774 stats->rxfifooverflow +=
775 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
776 stats->rxfifooverflow +=
777 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
779 stats->rxvlanframes_gb +=
780 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
781 stats->rxvlanframes_gb +=
782 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
784 stats->rxwatchdogerror +=
785 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
787 /* Un-freeze counters */
788 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
792 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
795 struct axgbe_port *pdata = dev->data->dev_private;
801 axgbe_read_mmc_stats(pdata);
803 for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
805 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
806 axgbe_xstats_strings[i].offset);
813 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
814 struct rte_eth_xstat_name *xstats_names,
819 if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
820 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
821 snprintf(xstats_names[i].name,
822 RTE_ETH_XSTATS_NAME_SIZE, "%s",
823 axgbe_xstats_strings[i].name);
827 return AXGBE_XSTATS_COUNT;
831 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
832 uint64_t *values, unsigned int n)
835 uint64_t values_copy[AXGBE_XSTATS_COUNT];
838 struct axgbe_port *pdata = dev->data->dev_private;
840 if (n < AXGBE_XSTATS_COUNT)
841 return AXGBE_XSTATS_COUNT;
843 axgbe_read_mmc_stats(pdata);
845 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
846 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
847 axgbe_xstats_strings[i].offset);
853 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
855 for (i = 0; i < n; i++) {
856 if (ids[i] >= AXGBE_XSTATS_COUNT) {
857 PMD_DRV_LOG(ERR, "id value isn't valid\n");
860 values[i] = values_copy[ids[i]];
866 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
867 struct rte_eth_xstat_name *xstats_names,
871 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
875 return axgbe_dev_xstats_get_names(dev, xstats_names, size);
877 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
879 for (i = 0; i < size; i++) {
880 if (ids[i] >= AXGBE_XSTATS_COUNT) {
881 PMD_DRV_LOG(ERR, "id value isn't valid\n");
884 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
890 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
892 struct axgbe_port *pdata = dev->data->dev_private;
893 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
895 /* MMC registers are configured for reset on read */
896 axgbe_read_mmc_stats(pdata);
899 memset(stats, 0, sizeof(*stats));
905 axgbe_dev_stats_get(struct rte_eth_dev *dev,
906 struct rte_eth_stats *stats)
908 struct axgbe_rx_queue *rxq;
909 struct axgbe_tx_queue *txq;
910 struct axgbe_port *pdata = dev->data->dev_private;
911 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
914 axgbe_read_mmc_stats(pdata);
916 stats->imissed = mmc_stats->rxfifooverflow;
918 for (i = 0; i < dev->data->nb_rx_queues; i++) {
919 rxq = dev->data->rx_queues[i];
920 stats->q_ipackets[i] = rxq->pkts;
921 stats->ipackets += rxq->pkts;
922 stats->q_ibytes[i] = rxq->bytes;
923 stats->ibytes += rxq->bytes;
924 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
925 stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed;
926 stats->ierrors += rxq->errors;
929 for (i = 0; i < dev->data->nb_tx_queues; i++) {
930 txq = dev->data->tx_queues[i];
931 stats->q_opackets[i] = txq->pkts;
932 stats->opackets += txq->pkts;
933 stats->q_obytes[i] = txq->bytes;
934 stats->obytes += txq->bytes;
935 stats->oerrors += txq->errors;
942 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
944 struct axgbe_rx_queue *rxq;
945 struct axgbe_tx_queue *txq;
948 for (i = 0; i < dev->data->nb_rx_queues; i++) {
949 rxq = dev->data->rx_queues[i];
953 rxq->rx_mbuf_alloc_failed = 0;
955 for (i = 0; i < dev->data->nb_tx_queues; i++) {
956 txq = dev->data->tx_queues[i];
966 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
968 struct axgbe_port *pdata = dev->data->dev_private;
970 dev_info->max_rx_queues = pdata->rx_ring_count;
971 dev_info->max_tx_queues = pdata->tx_ring_count;
972 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
973 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
974 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
975 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
976 dev_info->speed_capa = ETH_LINK_SPEED_10G;
978 dev_info->rx_offload_capa =
979 DEV_RX_OFFLOAD_IPV4_CKSUM |
980 DEV_RX_OFFLOAD_UDP_CKSUM |
981 DEV_RX_OFFLOAD_TCP_CKSUM |
982 DEV_RX_OFFLOAD_JUMBO_FRAME |
983 DEV_RX_OFFLOAD_SCATTER |
984 DEV_RX_OFFLOAD_KEEP_CRC;
986 dev_info->tx_offload_capa =
987 DEV_TX_OFFLOAD_IPV4_CKSUM |
988 DEV_TX_OFFLOAD_UDP_CKSUM |
989 DEV_TX_OFFLOAD_TCP_CKSUM;
991 if (pdata->hw_feat.rss) {
992 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
993 dev_info->reta_size = pdata->hw_feat.hash_table_size;
994 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
997 dev_info->rx_desc_lim = rx_desc_lim;
998 dev_info->tx_desc_lim = tx_desc_lim;
1000 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1001 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
1004 dev_info->default_txconf = (struct rte_eth_txconf) {
1005 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
1011 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
1013 unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
1014 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1016 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
1017 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
1018 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
1020 memset(hw_feat, 0, sizeof(*hw_feat));
1022 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
1024 /* Hardware feature register 0 */
1025 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
1026 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
1027 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
1028 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
1029 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
1030 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
1031 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
1032 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
1033 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
1034 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
1035 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
1036 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
1038 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
1039 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
1041 /* Hardware feature register 1 */
1042 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1044 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1046 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
1047 MAC_HWF1R, ADVTHWORD);
1048 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
1049 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
1050 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
1051 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
1052 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
1053 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
1054 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
1055 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1057 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1060 /* Hardware feature register 2 */
1061 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
1062 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
1063 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
1064 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
1065 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
1066 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
1069 /* Translate the Hash Table size into actual number */
1070 switch (hw_feat->hash_table_size) {
1074 hw_feat->hash_table_size = 64;
1077 hw_feat->hash_table_size = 128;
1080 hw_feat->hash_table_size = 256;
1084 /* Translate the address width setting into actual number */
1085 switch (hw_feat->dma_width) {
1087 hw_feat->dma_width = 32;
1090 hw_feat->dma_width = 40;
1093 hw_feat->dma_width = 48;
1096 hw_feat->dma_width = 32;
1099 /* The Queue, Channel and TC counts are zero based so increment them
1100 * to get the actual number
1102 hw_feat->rx_q_cnt++;
1103 hw_feat->tx_q_cnt++;
1104 hw_feat->rx_ch_cnt++;
1105 hw_feat->tx_ch_cnt++;
1108 /* Translate the fifo sizes into actual numbers */
1109 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
1110 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
1113 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
1115 axgbe_init_function_ptrs_dev(&pdata->hw_if);
1116 axgbe_init_function_ptrs_phy(&pdata->phy_if);
1117 axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
1118 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
1121 static void axgbe_set_counts(struct axgbe_port *pdata)
1123 /* Set all the function pointers */
1124 axgbe_init_all_fptrs(pdata);
1126 /* Populate the hardware features */
1127 axgbe_get_all_hw_features(pdata);
1129 /* Set default max values if not provided */
1130 if (!pdata->tx_max_channel_count)
1131 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
1132 if (!pdata->rx_max_channel_count)
1133 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
1135 if (!pdata->tx_max_q_count)
1136 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
1137 if (!pdata->rx_max_q_count)
1138 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
1140 /* Calculate the number of Tx and Rx rings to be created
1141 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
1142 * the number of Tx queues to the number of Tx channels
1144 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
1145 * number of Rx queues or maximum allowed
1147 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
1148 pdata->tx_max_channel_count);
1149 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
1150 pdata->tx_max_q_count);
1152 pdata->tx_q_count = pdata->tx_ring_count;
1154 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
1155 pdata->rx_max_channel_count);
1157 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
1158 pdata->rx_max_q_count);
1161 static void axgbe_default_config(struct axgbe_port *pdata)
1163 pdata->pblx8 = DMA_PBL_X8_ENABLE;
1164 pdata->tx_sf_mode = MTL_TSF_ENABLE;
1165 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
1166 pdata->tx_pbl = DMA_PBL_32;
1167 pdata->tx_osp_mode = DMA_OSP_ENABLE;
1168 pdata->rx_sf_mode = MTL_RSF_ENABLE;
1169 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
1170 pdata->rx_pbl = DMA_PBL_32;
1171 pdata->pause_autoneg = 1;
1172 pdata->tx_pause = 0;
1173 pdata->rx_pause = 0;
1174 pdata->phy_speed = SPEED_UNKNOWN;
1175 pdata->power_down = 0;
1179 pci_device_cmp(const struct rte_device *dev, const void *_pci_id)
1181 const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
1182 const struct rte_pci_id *pcid = _pci_id;
1184 if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID &&
1185 pdev->id.device_id == pcid->device_id)
1191 pci_search_device(int device_id)
1193 struct rte_bus *pci_bus;
1194 struct rte_pci_id dev_id;
1196 dev_id.device_id = device_id;
1197 pci_bus = rte_bus_find_by_name("pci");
1198 return (pci_bus != NULL) &&
1199 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL);
1203 * It returns 0 on success.
1206 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
1208 PMD_INIT_FUNC_TRACE();
1209 struct axgbe_port *pdata;
1210 struct rte_pci_device *pci_dev;
1211 uint32_t reg, mac_lo, mac_hi;
1215 eth_dev->dev_ops = &axgbe_eth_dev_ops;
1218 * For secondary processes, we don't initialise any further as primary
1219 * has already done this work.
1221 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1224 pdata = eth_dev->data->dev_private;
1226 axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
1227 axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
1228 pdata->eth_dev = eth_dev;
1230 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1231 pdata->pci_dev = pci_dev;
1234 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
1236 if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) {
1237 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
1238 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
1240 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
1241 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
1245 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
1246 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
1247 + AXGBE_MAC_PROP_OFFSET);
1248 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
1249 + AXGBE_I2C_CTRL_OFFSET);
1250 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
1252 /* version specific driver data*/
1253 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
1254 pdata->vdata = &axgbe_v2a;
1256 pdata->vdata = &axgbe_v2b;
1258 /* Configure the PCS indirect addressing support */
1259 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
1260 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
1261 pdata->xpcs_window <<= 6;
1262 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
1263 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
1264 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
1267 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
1268 pdata->xpcs_window_size, pdata->xpcs_window_mask);
1269 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
1271 /* Retrieve the MAC address */
1272 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
1273 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
1274 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
1275 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
1276 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
1277 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
1278 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
1279 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
1281 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS;
1282 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0);
1284 if (!eth_dev->data->mac_addrs) {
1286 "Failed to alloc %u bytes needed to "
1287 "store MAC addresses", len);
1291 /* Allocate memory for storing hash filter MAC addresses */
1292 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS;
1293 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr",
1296 if (eth_dev->data->hash_mac_addrs == NULL) {
1298 "Failed to allocate %d bytes needed to "
1299 "store MAC addresses", len);
1303 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
1304 rte_eth_random_addr(pdata->mac_addr.addr_bytes);
1306 /* Copy the permanent MAC address */
1307 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
1309 /* Clock settings */
1310 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
1311 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1313 /* Set the DMA coherency values */
1314 pdata->coherent = 1;
1315 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
1316 pdata->arcache = AXGBE_DMA_OS_ARCACHE;
1317 pdata->awcache = AXGBE_DMA_OS_AWCACHE;
1319 /* Set the maximum channels and queues */
1320 reg = XP_IOREAD(pdata, XP_PROP_1);
1321 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
1322 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
1323 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
1324 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
1326 /* Set the hardware channel and queue counts */
1327 axgbe_set_counts(pdata);
1329 /* Set the maximum fifo amounts */
1330 reg = XP_IOREAD(pdata, XP_PROP_2);
1331 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
1332 pdata->tx_max_fifo_size *= 16384;
1333 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
1334 pdata->vdata->tx_max_fifo_size);
1335 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
1336 pdata->rx_max_fifo_size *= 16384;
1337 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
1338 pdata->vdata->rx_max_fifo_size);
1339 /* Issue software reset to DMA */
1340 ret = pdata->hw_if.exit(pdata);
1342 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
1344 /* Set default configuration data */
1345 axgbe_default_config(pdata);
1347 /* Set default max values if not provided */
1348 if (!pdata->tx_max_fifo_size)
1349 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
1350 if (!pdata->rx_max_fifo_size)
1351 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
1353 pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
1354 pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
1355 pthread_mutex_init(&pdata->xpcs_mutex, NULL);
1356 pthread_mutex_init(&pdata->i2c_mutex, NULL);
1357 pthread_mutex_init(&pdata->an_mutex, NULL);
1358 pthread_mutex_init(&pdata->phy_mutex, NULL);
1360 ret = pdata->phy_if.phy_init(pdata);
1362 rte_free(eth_dev->data->mac_addrs);
1363 eth_dev->data->mac_addrs = NULL;
1367 rte_intr_callback_register(&pci_dev->intr_handle,
1368 axgbe_dev_interrupt_handler,
1370 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1371 eth_dev->data->port_id, pci_dev->id.vendor_id,
1372 pci_dev->id.device_id);
1378 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1380 struct rte_pci_device *pci_dev;
1382 PMD_INIT_FUNC_TRACE();
1384 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1387 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1388 eth_dev->dev_ops = NULL;
1389 eth_dev->rx_pkt_burst = NULL;
1390 eth_dev->tx_pkt_burst = NULL;
1391 axgbe_dev_clear_queues(eth_dev);
1393 /* disable uio intr before callback unregister */
1394 rte_intr_disable(&pci_dev->intr_handle);
1395 rte_intr_callback_unregister(&pci_dev->intr_handle,
1396 axgbe_dev_interrupt_handler,
1402 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1403 struct rte_pci_device *pci_dev)
1405 return rte_eth_dev_pci_generic_probe(pci_dev,
1406 sizeof(struct axgbe_port), eth_axgbe_dev_init);
1409 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
1411 return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
1414 static struct rte_pci_driver rte_axgbe_pmd = {
1415 .id_table = pci_id_axgbe_map,
1416 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1417 .probe = eth_axgbe_pci_probe,
1418 .remove = eth_axgbe_pci_remove,
1421 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
1422 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
1423 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1425 RTE_INIT(axgbe_init_log)
1427 axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
1428 if (axgbe_logtype_init >= 0)
1429 rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
1430 axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
1431 if (axgbe_logtype_driver >= 0)
1432 rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);