1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3 * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
10 #include "axgbe_regs.h"
13 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
14 static int axgbe_dev_configure(struct rte_eth_dev *dev);
15 static int axgbe_dev_start(struct rte_eth_dev *dev);
16 static int axgbe_dev_stop(struct rte_eth_dev *dev);
17 static void axgbe_dev_interrupt_handler(void *param);
18 static int axgbe_dev_close(struct rte_eth_dev *dev);
19 static int axgbe_dev_reset(struct rte_eth_dev *dev);
20 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
21 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
22 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
23 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
24 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev,
25 struct rte_ether_addr *mac_addr);
26 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev,
27 struct rte_ether_addr *mac_addr,
30 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
31 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
32 struct rte_ether_addr *mc_addr_set,
34 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
35 struct rte_ether_addr *mac_addr,
37 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev,
39 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
40 int wait_to_complete);
41 static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
42 struct rte_dev_reg_info *regs);
43 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
44 struct rte_eth_stats *stats);
45 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
46 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
47 struct rte_eth_xstat *stats,
50 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
51 struct rte_eth_xstat_name *xstats_names,
54 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
59 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
61 struct rte_eth_xstat_name *xstats_names,
63 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
64 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
65 struct rte_eth_rss_reta_entry64 *reta_conf,
67 static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
68 struct rte_eth_rss_reta_entry64 *reta_conf,
70 static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
71 struct rte_eth_rss_conf *rss_conf);
72 static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
73 struct rte_eth_rss_conf *rss_conf);
74 static int axgbe_dev_info_get(struct rte_eth_dev *dev,
75 struct rte_eth_dev_info *dev_info);
76 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev,
77 struct rte_eth_fc_conf *fc_conf);
78 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev,
79 struct rte_eth_fc_conf *fc_conf);
80 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
81 struct rte_eth_pfc_conf *pfc_conf);
82 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
83 struct rte_eth_rxq_info *qinfo);
84 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
85 struct rte_eth_txq_info *qinfo);
86 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
87 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
90 axgbe_timesync_enable(struct rte_eth_dev *dev);
92 axgbe_timesync_disable(struct rte_eth_dev *dev);
94 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
95 struct timespec *timestamp, uint32_t flags);
97 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
98 struct timespec *timestamp);
100 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
102 axgbe_timesync_read_time(struct rte_eth_dev *dev,
103 struct timespec *timestamp);
105 axgbe_timesync_write_time(struct rte_eth_dev *dev,
106 const struct timespec *timestamp);
108 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec,
111 axgbe_update_tstamp_addend(struct axgbe_port *pdata,
112 unsigned int addend);
114 axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on);
115 static int axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
116 enum rte_vlan_type vlan_type, uint16_t tpid);
117 static int axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
119 struct axgbe_xstats {
120 char name[RTE_ETH_XSTATS_NAME_SIZE];
124 #define AXGMAC_MMC_STAT(_string, _var) \
126 offsetof(struct axgbe_mmc_stats, _var), \
129 static const struct axgbe_xstats axgbe_xstats_strings[] = {
130 AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
131 AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
132 AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
133 AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
134 AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
135 AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
136 AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
137 AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
138 AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
139 AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
140 AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
141 AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
142 AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
143 AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
145 AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
146 AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
147 AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
148 AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
149 AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
150 AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
151 AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
152 AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
153 AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
154 AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
155 AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
156 AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
157 AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
158 AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
159 AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
160 AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
161 AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
162 AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
163 AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
164 AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
165 AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
166 AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
169 #define AXGBE_XSTATS_COUNT ARRAY_SIZE(axgbe_xstats_strings)
171 /* The set of PCI devices this driver supports */
172 #define AMD_PCI_VENDOR_ID 0x1022
173 #define AMD_PCI_RV_ROOT_COMPLEX_ID 0x15d0
174 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
175 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
177 static const struct rte_pci_id pci_id_axgbe_map[] = {
178 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
179 {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
183 static struct axgbe_version_data axgbe_v2a = {
184 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
185 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
187 .tx_max_fifo_size = 229376,
188 .rx_max_fifo_size = 229376,
189 .tx_tstamp_workaround = 1,
192 .an_cdr_workaround = 1,
195 static struct axgbe_version_data axgbe_v2b = {
196 .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
197 .xpcs_access = AXGBE_XPCS_ACCESS_V2,
199 .tx_max_fifo_size = 65536,
200 .rx_max_fifo_size = 65536,
201 .tx_tstamp_workaround = 1,
204 .an_cdr_workaround = 1,
207 static const struct rte_eth_desc_lim rx_desc_lim = {
208 .nb_max = AXGBE_MAX_RING_DESC,
209 .nb_min = AXGBE_MIN_RING_DESC,
213 static const struct rte_eth_desc_lim tx_desc_lim = {
214 .nb_max = AXGBE_MAX_RING_DESC,
215 .nb_min = AXGBE_MIN_RING_DESC,
219 static const struct eth_dev_ops axgbe_eth_dev_ops = {
220 .dev_configure = axgbe_dev_configure,
221 .dev_start = axgbe_dev_start,
222 .dev_stop = axgbe_dev_stop,
223 .dev_close = axgbe_dev_close,
224 .dev_reset = axgbe_dev_reset,
225 .promiscuous_enable = axgbe_dev_promiscuous_enable,
226 .promiscuous_disable = axgbe_dev_promiscuous_disable,
227 .allmulticast_enable = axgbe_dev_allmulticast_enable,
228 .allmulticast_disable = axgbe_dev_allmulticast_disable,
229 .mac_addr_set = axgbe_dev_mac_addr_set,
230 .mac_addr_add = axgbe_dev_mac_addr_add,
231 .mac_addr_remove = axgbe_dev_mac_addr_remove,
232 .set_mc_addr_list = axgbe_dev_set_mc_addr_list,
233 .uc_hash_table_set = axgbe_dev_uc_hash_table_set,
234 .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set,
235 .link_update = axgbe_dev_link_update,
236 .get_reg = axgbe_dev_get_regs,
237 .stats_get = axgbe_dev_stats_get,
238 .stats_reset = axgbe_dev_stats_reset,
239 .xstats_get = axgbe_dev_xstats_get,
240 .xstats_reset = axgbe_dev_xstats_reset,
241 .xstats_get_names = axgbe_dev_xstats_get_names,
242 .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
243 .xstats_get_by_id = axgbe_dev_xstats_get_by_id,
244 .reta_update = axgbe_dev_rss_reta_update,
245 .reta_query = axgbe_dev_rss_reta_query,
246 .rss_hash_update = axgbe_dev_rss_hash_update,
247 .rss_hash_conf_get = axgbe_dev_rss_hash_conf_get,
248 .dev_infos_get = axgbe_dev_info_get,
249 .rx_queue_setup = axgbe_dev_rx_queue_setup,
250 .rx_queue_release = axgbe_dev_rx_queue_release,
251 .tx_queue_setup = axgbe_dev_tx_queue_setup,
252 .tx_queue_release = axgbe_dev_tx_queue_release,
253 .flow_ctrl_get = axgbe_flow_ctrl_get,
254 .flow_ctrl_set = axgbe_flow_ctrl_set,
255 .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set,
256 .rxq_info_get = axgbe_rxq_info_get,
257 .txq_info_get = axgbe_txq_info_get,
258 .dev_supported_ptypes_get = axgbe_dev_supported_ptypes_get,
259 .mtu_set = axgb_mtu_set,
260 .vlan_filter_set = axgbe_vlan_filter_set,
261 .vlan_tpid_set = axgbe_vlan_tpid_set,
262 .vlan_offload_set = axgbe_vlan_offload_set,
263 .timesync_enable = axgbe_timesync_enable,
264 .timesync_disable = axgbe_timesync_disable,
265 .timesync_read_rx_timestamp = axgbe_timesync_read_rx_timestamp,
266 .timesync_read_tx_timestamp = axgbe_timesync_read_tx_timestamp,
267 .timesync_adjust_time = axgbe_timesync_adjust_time,
268 .timesync_read_time = axgbe_timesync_read_time,
269 .timesync_write_time = axgbe_timesync_write_time,
270 .fw_version_get = axgbe_dev_fw_version_get,
273 static int axgbe_phy_reset(struct axgbe_port *pdata)
275 pdata->phy_link = -1;
276 pdata->phy_speed = SPEED_UNKNOWN;
277 return pdata->phy_if.phy_reset(pdata);
281 * Interrupt handler triggered by NIC for handling
282 * specific interrupt.
285 * Pointer to interrupt handle.
287 * The address of parameter (struct rte_eth_dev *) regsitered before.
293 axgbe_dev_interrupt_handler(void *param)
295 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
296 struct axgbe_port *pdata = dev->data->dev_private;
297 unsigned int dma_isr, dma_ch_isr;
299 pdata->phy_if.an_isr(pdata);
300 /*DMA related interrupts*/
301 dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
302 PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
306 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
309 PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
310 AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
312 DMA_CH_SR, dma_ch_isr);
315 /* Unmask interrupts since disabled after generation */
316 rte_intr_ack(&pdata->pci_dev->intr_handle);
320 * Configure device link speed and setup link.
321 * It returns 0 on success.
324 axgbe_dev_configure(struct rte_eth_dev *dev)
326 struct axgbe_port *pdata = dev->data->dev_private;
327 /* Checksum offload to hardware */
328 pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
329 DEV_RX_OFFLOAD_CHECKSUM;
334 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
336 struct axgbe_port *pdata = dev->data->dev_private;
338 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
339 pdata->rss_enable = 1;
340 else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
341 pdata->rss_enable = 0;
348 axgbe_dev_start(struct rte_eth_dev *dev)
350 struct axgbe_port *pdata = dev->data->dev_private;
352 struct rte_eth_dev_data *dev_data = dev->data;
353 uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
355 dev->dev_ops = &axgbe_eth_dev_ops;
357 PMD_INIT_FUNC_TRACE();
360 ret = axgbe_dev_rx_mq_config(dev);
362 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
365 ret = axgbe_phy_reset(pdata);
367 PMD_DRV_LOG(ERR, "phy reset failed\n");
370 ret = pdata->hw_if.init(pdata);
372 PMD_DRV_LOG(ERR, "dev_init failed\n");
376 /* enable uio/vfio intr/eventfd mapping */
377 rte_intr_enable(&pdata->pci_dev->intr_handle);
380 pdata->phy_if.phy_start(pdata);
381 axgbe_dev_enable_tx(dev);
382 axgbe_dev_enable_rx(dev);
384 rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
385 rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
386 if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
387 max_pkt_len > pdata->rx_buf_size)
388 dev_data->scattered_rx = 1;
390 /* Scatter Rx handling */
391 if (dev_data->scattered_rx)
392 dev->rx_pkt_burst = ð_axgbe_recv_scattered_pkts;
394 dev->rx_pkt_burst = &axgbe_recv_pkts;
399 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
401 axgbe_dev_stop(struct rte_eth_dev *dev)
403 struct axgbe_port *pdata = dev->data->dev_private;
405 PMD_INIT_FUNC_TRACE();
407 rte_intr_disable(&pdata->pci_dev->intr_handle);
409 if (rte_bit_relaxed_get32(AXGBE_STOPPED, &pdata->dev_state))
412 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
413 axgbe_dev_disable_tx(dev);
414 axgbe_dev_disable_rx(dev);
416 pdata->phy_if.phy_stop(pdata);
417 pdata->hw_if.exit(pdata);
418 memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
419 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
425 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
427 struct axgbe_port *pdata = dev->data->dev_private;
429 PMD_INIT_FUNC_TRACE();
431 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
437 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
439 struct axgbe_port *pdata = dev->data->dev_private;
441 PMD_INIT_FUNC_TRACE();
443 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
449 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
451 struct axgbe_port *pdata = dev->data->dev_private;
453 PMD_INIT_FUNC_TRACE();
455 if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
457 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
463 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
465 struct axgbe_port *pdata = dev->data->dev_private;
467 PMD_INIT_FUNC_TRACE();
469 if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
471 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
477 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
479 struct axgbe_port *pdata = dev->data->dev_private;
481 /* Set Default MAC Addr */
482 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0);
488 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
489 uint32_t index, uint32_t pool __rte_unused)
491 struct axgbe_port *pdata = dev->data->dev_private;
492 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
494 if (index > hw_feat->addn_mac) {
495 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
498 axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
503 axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
504 struct rte_eth_rss_reta_entry64 *reta_conf,
507 struct axgbe_port *pdata = dev->data->dev_private;
508 unsigned int i, idx, shift;
511 if (!pdata->rss_enable) {
512 PMD_DRV_LOG(ERR, "RSS not enabled\n");
516 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
517 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
521 for (i = 0; i < reta_size; i++) {
522 idx = i / RTE_RETA_GROUP_SIZE;
523 shift = i % RTE_RETA_GROUP_SIZE;
524 if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
526 pdata->rss_table[i] = reta_conf[idx].reta[shift];
529 /* Program the lookup table */
530 ret = axgbe_write_rss_lookup_table(pdata);
535 axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
536 struct rte_eth_rss_reta_entry64 *reta_conf,
539 struct axgbe_port *pdata = dev->data->dev_private;
540 unsigned int i, idx, shift;
542 if (!pdata->rss_enable) {
543 PMD_DRV_LOG(ERR, "RSS not enabled\n");
547 if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
548 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
552 for (i = 0; i < reta_size; i++) {
553 idx = i / RTE_RETA_GROUP_SIZE;
554 shift = i % RTE_RETA_GROUP_SIZE;
555 if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
557 reta_conf[idx].reta[shift] = pdata->rss_table[i];
563 axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
564 struct rte_eth_rss_conf *rss_conf)
566 struct axgbe_port *pdata = dev->data->dev_private;
569 if (!pdata->rss_enable) {
570 PMD_DRV_LOG(ERR, "RSS not enabled\n");
574 if (rss_conf == NULL) {
575 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
579 if (rss_conf->rss_key != NULL &&
580 rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) {
581 rte_memcpy(pdata->rss_key, rss_conf->rss_key,
582 AXGBE_RSS_HASH_KEY_SIZE);
583 /* Program the hash key */
584 ret = axgbe_write_rss_hash_key(pdata);
589 pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
591 if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
592 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
594 (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
595 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
597 (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
598 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
600 /* Set the RSS options */
601 AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
607 axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
608 struct rte_eth_rss_conf *rss_conf)
610 struct axgbe_port *pdata = dev->data->dev_private;
612 if (!pdata->rss_enable) {
613 PMD_DRV_LOG(ERR, "RSS not enabled\n");
617 if (rss_conf == NULL) {
618 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
622 if (rss_conf->rss_key != NULL &&
623 rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) {
624 rte_memcpy(rss_conf->rss_key, pdata->rss_key,
625 AXGBE_RSS_HASH_KEY_SIZE);
627 rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE;
628 rss_conf->rss_hf = pdata->rss_hf;
633 axgbe_dev_reset(struct rte_eth_dev *dev)
637 ret = axgbe_dev_close(dev);
641 ret = eth_axgbe_dev_init(dev);
647 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
649 struct axgbe_port *pdata = dev->data->dev_private;
650 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
652 if (index > hw_feat->addn_mac) {
653 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
656 axgbe_set_mac_addn_addr(pdata, NULL, index);
660 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
661 struct rte_ether_addr *mc_addr_set,
664 struct axgbe_port *pdata = dev->data->dev_private;
665 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
666 uint32_t index = 1; /* 0 is always default mac */
669 if (nb_mc_addr > hw_feat->addn_mac) {
670 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
674 /* clear unicast addresses */
675 for (i = 1; i < hw_feat->addn_mac; i++) {
676 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
678 memset(&dev->data->mac_addrs[i], 0,
679 sizeof(struct rte_ether_addr));
683 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++);
689 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
690 struct rte_ether_addr *mac_addr, uint8_t add)
692 struct axgbe_port *pdata = dev->data->dev_private;
693 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
695 if (!hw_feat->hash_table_size) {
696 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
700 axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add);
702 if (pdata->uc_hash_mac_addr > 0) {
703 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
704 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
706 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
707 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
713 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
715 struct axgbe_port *pdata = dev->data->dev_private;
716 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
719 if (!hw_feat->hash_table_size) {
720 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
724 for (index = 0; index < pdata->hash_table_count; index++) {
726 pdata->uc_hash_table[index] = ~0;
728 pdata->uc_hash_table[index] = 0;
730 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n",
731 add ? "set" : "clear", index);
733 AXGMAC_IOWRITE(pdata, MAC_HTR(index),
734 pdata->uc_hash_table[index]);
738 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
739 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
741 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
742 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
747 /* return 0 means link status changed, -1 means not changed */
749 axgbe_dev_link_update(struct rte_eth_dev *dev,
750 int wait_to_complete __rte_unused)
752 struct axgbe_port *pdata = dev->data->dev_private;
753 struct rte_eth_link link;
756 PMD_INIT_FUNC_TRACE();
759 pdata->phy_if.phy_status(pdata);
761 memset(&link, 0, sizeof(struct rte_eth_link));
762 link.link_duplex = pdata->phy.duplex;
763 link.link_status = pdata->phy_link;
764 link.link_speed = pdata->phy_speed;
765 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
766 ETH_LINK_SPEED_FIXED);
767 ret = rte_eth_linkstatus_set(dev, &link);
769 PMD_DRV_LOG(ERR, "No change in link status\n");
775 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
777 struct axgbe_port *pdata = dev->data->dev_private;
779 if (regs->data == NULL) {
780 regs->length = axgbe_regs_get_count(pdata);
781 regs->width = sizeof(uint32_t);
785 /* Only full register dump is supported */
787 regs->length != (uint32_t)axgbe_regs_get_count(pdata))
790 regs->version = pdata->pci_dev->id.vendor_id << 16 |
791 pdata->pci_dev->id.device_id;
792 axgbe_regs_dump(pdata, regs->data);
795 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
797 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
799 /* Freeze counters */
800 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
803 stats->txoctetcount_gb +=
804 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
805 stats->txoctetcount_gb +=
806 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
808 stats->txframecount_gb +=
809 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
810 stats->txframecount_gb +=
811 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
813 stats->txbroadcastframes_g +=
814 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
815 stats->txbroadcastframes_g +=
816 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
818 stats->txmulticastframes_g +=
819 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
820 stats->txmulticastframes_g +=
821 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
823 stats->tx64octets_gb +=
824 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
825 stats->tx64octets_gb +=
826 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
828 stats->tx65to127octets_gb +=
829 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
830 stats->tx65to127octets_gb +=
831 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
833 stats->tx128to255octets_gb +=
834 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
835 stats->tx128to255octets_gb +=
836 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
838 stats->tx256to511octets_gb +=
839 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
840 stats->tx256to511octets_gb +=
841 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
843 stats->tx512to1023octets_gb +=
844 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
845 stats->tx512to1023octets_gb +=
846 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
848 stats->tx1024tomaxoctets_gb +=
849 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
850 stats->tx1024tomaxoctets_gb +=
851 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
853 stats->txunicastframes_gb +=
854 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
855 stats->txunicastframes_gb +=
856 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
858 stats->txmulticastframes_gb +=
859 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
860 stats->txmulticastframes_gb +=
861 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
863 stats->txbroadcastframes_g +=
864 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
865 stats->txbroadcastframes_g +=
866 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
868 stats->txunderflowerror +=
869 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
870 stats->txunderflowerror +=
871 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
873 stats->txoctetcount_g +=
874 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
875 stats->txoctetcount_g +=
876 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
878 stats->txframecount_g +=
879 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
880 stats->txframecount_g +=
881 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
883 stats->txpauseframes +=
884 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
885 stats->txpauseframes +=
886 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
888 stats->txvlanframes_g +=
889 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
890 stats->txvlanframes_g +=
891 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
894 stats->rxframecount_gb +=
895 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
896 stats->rxframecount_gb +=
897 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
899 stats->rxoctetcount_gb +=
900 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
901 stats->rxoctetcount_gb +=
902 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
904 stats->rxoctetcount_g +=
905 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
906 stats->rxoctetcount_g +=
907 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
909 stats->rxbroadcastframes_g +=
910 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
911 stats->rxbroadcastframes_g +=
912 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
914 stats->rxmulticastframes_g +=
915 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
916 stats->rxmulticastframes_g +=
917 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
920 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
922 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
924 stats->rxrunterror +=
925 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
927 stats->rxjabbererror +=
928 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
930 stats->rxundersize_g +=
931 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
933 stats->rxoversize_g +=
934 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
936 stats->rx64octets_gb +=
937 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
938 stats->rx64octets_gb +=
939 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
941 stats->rx65to127octets_gb +=
942 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
943 stats->rx65to127octets_gb +=
944 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
946 stats->rx128to255octets_gb +=
947 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
948 stats->rx128to255octets_gb +=
949 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
951 stats->rx256to511octets_gb +=
952 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
953 stats->rx256to511octets_gb +=
954 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
956 stats->rx512to1023octets_gb +=
957 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
958 stats->rx512to1023octets_gb +=
959 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
961 stats->rx1024tomaxoctets_gb +=
962 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
963 stats->rx1024tomaxoctets_gb +=
964 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
966 stats->rxunicastframes_g +=
967 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
968 stats->rxunicastframes_g +=
969 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
971 stats->rxlengtherror +=
972 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
973 stats->rxlengtherror +=
974 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
976 stats->rxoutofrangetype +=
977 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
978 stats->rxoutofrangetype +=
979 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
981 stats->rxpauseframes +=
982 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
983 stats->rxpauseframes +=
984 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
986 stats->rxfifooverflow +=
987 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
988 stats->rxfifooverflow +=
989 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
991 stats->rxvlanframes_gb +=
992 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
993 stats->rxvlanframes_gb +=
994 ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
996 stats->rxwatchdogerror +=
997 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
999 /* Un-freeze counters */
1000 AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
1004 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1007 struct axgbe_port *pdata = dev->data->dev_private;
1013 axgbe_read_mmc_stats(pdata);
1015 for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
1017 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
1018 axgbe_xstats_strings[i].offset);
1025 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1026 struct rte_eth_xstat_name *xstats_names,
1031 if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
1032 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
1033 snprintf(xstats_names[i].name,
1034 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1035 axgbe_xstats_strings[i].name);
1039 return AXGBE_XSTATS_COUNT;
1043 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1044 uint64_t *values, unsigned int n)
1047 uint64_t values_copy[AXGBE_XSTATS_COUNT];
1050 struct axgbe_port *pdata = dev->data->dev_private;
1052 if (n < AXGBE_XSTATS_COUNT)
1053 return AXGBE_XSTATS_COUNT;
1055 axgbe_read_mmc_stats(pdata);
1057 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
1058 values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
1059 axgbe_xstats_strings[i].offset);
1065 axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
1067 for (i = 0; i < n; i++) {
1068 if (ids[i] >= AXGBE_XSTATS_COUNT) {
1069 PMD_DRV_LOG(ERR, "id value isn't valid\n");
1072 values[i] = values_copy[ids[i]];
1078 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1079 const uint64_t *ids,
1080 struct rte_eth_xstat_name *xstats_names,
1083 struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
1087 return axgbe_dev_xstats_get_names(dev, xstats_names, size);
1089 axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
1091 for (i = 0; i < size; i++) {
1092 if (ids[i] >= AXGBE_XSTATS_COUNT) {
1093 PMD_DRV_LOG(ERR, "id value isn't valid\n");
1096 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1102 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1104 struct axgbe_port *pdata = dev->data->dev_private;
1105 struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
1107 /* MMC registers are configured for reset on read */
1108 axgbe_read_mmc_stats(pdata);
1111 memset(stats, 0, sizeof(*stats));
1117 axgbe_dev_stats_get(struct rte_eth_dev *dev,
1118 struct rte_eth_stats *stats)
1120 struct axgbe_rx_queue *rxq;
1121 struct axgbe_tx_queue *txq;
1122 struct axgbe_port *pdata = dev->data->dev_private;
1123 struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
1126 axgbe_read_mmc_stats(pdata);
1128 stats->imissed = mmc_stats->rxfifooverflow;
1130 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1131 rxq = dev->data->rx_queues[i];
1133 stats->q_ipackets[i] = rxq->pkts;
1134 stats->ipackets += rxq->pkts;
1135 stats->q_ibytes[i] = rxq->bytes;
1136 stats->ibytes += rxq->bytes;
1137 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
1138 stats->q_errors[i] = rxq->errors
1139 + rxq->rx_mbuf_alloc_failed;
1140 stats->ierrors += rxq->errors;
1142 PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
1143 dev->data->port_id);
1147 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1148 txq = dev->data->tx_queues[i];
1150 stats->q_opackets[i] = txq->pkts;
1151 stats->opackets += txq->pkts;
1152 stats->q_obytes[i] = txq->bytes;
1153 stats->obytes += txq->bytes;
1154 stats->oerrors += txq->errors;
1156 PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
1157 dev->data->port_id);
1165 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
1167 struct axgbe_rx_queue *rxq;
1168 struct axgbe_tx_queue *txq;
1171 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1172 rxq = dev->data->rx_queues[i];
1177 rxq->rx_mbuf_alloc_failed = 0;
1179 PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
1180 dev->data->port_id);
1183 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1184 txq = dev->data->tx_queues[i];
1190 PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
1191 dev->data->port_id);
1199 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1201 struct axgbe_port *pdata = dev->data->dev_private;
1203 dev_info->max_rx_queues = pdata->rx_ring_count;
1204 dev_info->max_tx_queues = pdata->tx_ring_count;
1205 dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
1206 dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
1207 dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
1208 dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
1209 dev_info->speed_capa = ETH_LINK_SPEED_10G;
1211 dev_info->rx_offload_capa =
1212 DEV_RX_OFFLOAD_VLAN_STRIP |
1213 DEV_RX_OFFLOAD_VLAN_FILTER |
1214 DEV_RX_OFFLOAD_VLAN_EXTEND |
1215 DEV_RX_OFFLOAD_IPV4_CKSUM |
1216 DEV_RX_OFFLOAD_UDP_CKSUM |
1217 DEV_RX_OFFLOAD_TCP_CKSUM |
1218 DEV_RX_OFFLOAD_JUMBO_FRAME |
1219 DEV_RX_OFFLOAD_SCATTER |
1220 DEV_RX_OFFLOAD_KEEP_CRC;
1222 dev_info->tx_offload_capa =
1223 DEV_TX_OFFLOAD_VLAN_INSERT |
1224 DEV_TX_OFFLOAD_QINQ_INSERT |
1225 DEV_TX_OFFLOAD_IPV4_CKSUM |
1226 DEV_TX_OFFLOAD_UDP_CKSUM |
1227 DEV_TX_OFFLOAD_TCP_CKSUM;
1229 if (pdata->hw_feat.rss) {
1230 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
1231 dev_info->reta_size = pdata->hw_feat.hash_table_size;
1232 dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
1235 dev_info->rx_desc_lim = rx_desc_lim;
1236 dev_info->tx_desc_lim = tx_desc_lim;
1238 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1239 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
1242 dev_info->default_txconf = (struct rte_eth_txconf) {
1243 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
1250 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1252 struct axgbe_port *pdata = dev->data->dev_private;
1253 struct xgbe_fc_info fc = pdata->fc;
1254 unsigned int reg, reg_val = 0;
1257 reg_val = AXGMAC_IOREAD(pdata, reg);
1258 fc.low_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA);
1259 fc.high_water[0] = AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD);
1260 fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT);
1261 fc.autoneg = pdata->pause_autoneg;
1263 if (pdata->rx_pause && pdata->tx_pause)
1264 fc.mode = RTE_FC_FULL;
1265 else if (pdata->rx_pause)
1266 fc.mode = RTE_FC_RX_PAUSE;
1267 else if (pdata->tx_pause)
1268 fc.mode = RTE_FC_TX_PAUSE;
1270 fc.mode = RTE_FC_NONE;
1272 fc_conf->high_water = (1024 + (fc.low_water[0] << 9)) / 1024;
1273 fc_conf->low_water = (1024 + (fc.high_water[0] << 9)) / 1024;
1274 fc_conf->pause_time = fc.pause_time[0];
1275 fc_conf->send_xon = fc.send_xon;
1276 fc_conf->mode = fc.mode;
1282 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1284 struct axgbe_port *pdata = dev->data->dev_private;
1285 struct xgbe_fc_info fc = pdata->fc;
1286 unsigned int reg, reg_val = 0;
1289 pdata->pause_autoneg = fc_conf->autoneg;
1290 pdata->phy.pause_autoneg = pdata->pause_autoneg;
1291 fc.send_xon = fc_conf->send_xon;
1292 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA,
1293 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water));
1294 AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD,
1295 AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water));
1296 AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time);
1297 AXGMAC_IOWRITE(pdata, reg, reg_val);
1298 fc.mode = fc_conf->mode;
1300 if (fc.mode == RTE_FC_FULL) {
1301 pdata->tx_pause = 1;
1302 pdata->rx_pause = 1;
1303 } else if (fc.mode == RTE_FC_RX_PAUSE) {
1304 pdata->tx_pause = 0;
1305 pdata->rx_pause = 1;
1306 } else if (fc.mode == RTE_FC_TX_PAUSE) {
1307 pdata->tx_pause = 1;
1308 pdata->rx_pause = 0;
1310 pdata->tx_pause = 0;
1311 pdata->rx_pause = 0;
1314 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1315 pdata->hw_if.config_tx_flow_control(pdata);
1317 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1318 pdata->hw_if.config_rx_flow_control(pdata);
1320 pdata->hw_if.config_flow_control(pdata);
1321 pdata->phy.tx_pause = pdata->tx_pause;
1322 pdata->phy.rx_pause = pdata->rx_pause;
1328 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
1329 struct rte_eth_pfc_conf *pfc_conf)
1331 struct axgbe_port *pdata = dev->data->dev_private;
1332 struct xgbe_fc_info fc = pdata->fc;
1335 tc_num = pdata->pfc_map[pfc_conf->priority];
1337 if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) {
1338 PMD_INIT_LOG(ERR, "Max supported traffic class: %d\n",
1339 pdata->hw_feat.tc_cnt);
1343 pdata->pause_autoneg = pfc_conf->fc.autoneg;
1344 pdata->phy.pause_autoneg = pdata->pause_autoneg;
1345 fc.send_xon = pfc_conf->fc.send_xon;
1346 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA,
1347 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water));
1348 AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD,
1349 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water));
1353 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1354 PSTC0, pfc_conf->fc.pause_time);
1357 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1358 PSTC1, pfc_conf->fc.pause_time);
1361 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1362 PSTC2, pfc_conf->fc.pause_time);
1365 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1366 PSTC3, pfc_conf->fc.pause_time);
1369 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1370 PSTC4, pfc_conf->fc.pause_time);
1373 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1374 PSTC5, pfc_conf->fc.pause_time);
1377 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1378 PSTC6, pfc_conf->fc.pause_time);
1381 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1382 PSTC7, pfc_conf->fc.pause_time);
1386 fc.mode = pfc_conf->fc.mode;
1388 if (fc.mode == RTE_FC_FULL) {
1389 pdata->tx_pause = 1;
1390 pdata->rx_pause = 1;
1391 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1392 } else if (fc.mode == RTE_FC_RX_PAUSE) {
1393 pdata->tx_pause = 0;
1394 pdata->rx_pause = 1;
1395 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1396 } else if (fc.mode == RTE_FC_TX_PAUSE) {
1397 pdata->tx_pause = 1;
1398 pdata->rx_pause = 0;
1399 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1401 pdata->tx_pause = 0;
1402 pdata->rx_pause = 0;
1403 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1406 if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1407 pdata->hw_if.config_tx_flow_control(pdata);
1409 if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1410 pdata->hw_if.config_rx_flow_control(pdata);
1411 pdata->hw_if.config_flow_control(pdata);
1412 pdata->phy.tx_pause = pdata->tx_pause;
1413 pdata->phy.rx_pause = pdata->rx_pause;
1419 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1420 struct rte_eth_rxq_info *qinfo)
1422 struct axgbe_rx_queue *rxq;
1424 rxq = dev->data->rx_queues[queue_id];
1425 qinfo->mp = rxq->mb_pool;
1426 qinfo->scattered_rx = dev->data->scattered_rx;
1427 qinfo->nb_desc = rxq->nb_desc;
1428 qinfo->conf.rx_free_thresh = rxq->free_thresh;
1432 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1433 struct rte_eth_txq_info *qinfo)
1435 struct axgbe_tx_queue *txq;
1437 txq = dev->data->tx_queues[queue_id];
1438 qinfo->nb_desc = txq->nb_desc;
1439 qinfo->conf.tx_free_thresh = txq->free_thresh;
1442 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1444 static const uint32_t ptypes[] = {
1446 RTE_PTYPE_L2_ETHER_TIMESYNC,
1447 RTE_PTYPE_L2_ETHER_LLDP,
1448 RTE_PTYPE_L2_ETHER_ARP,
1449 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1450 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1453 RTE_PTYPE_L4_NONFRAG,
1457 RTE_PTYPE_TUNNEL_GRENAT,
1458 RTE_PTYPE_TUNNEL_IP,
1459 RTE_PTYPE_INNER_L2_ETHER,
1460 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1461 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1462 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1463 RTE_PTYPE_INNER_L4_FRAG,
1464 RTE_PTYPE_INNER_L4_ICMP,
1465 RTE_PTYPE_INNER_L4_NONFRAG,
1466 RTE_PTYPE_INNER_L4_SCTP,
1467 RTE_PTYPE_INNER_L4_TCP,
1468 RTE_PTYPE_INNER_L4_UDP,
1472 if (dev->rx_pkt_burst == axgbe_recv_pkts)
1477 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1479 struct rte_eth_dev_info dev_info;
1480 struct axgbe_port *pdata = dev->data->dev_private;
1481 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1482 unsigned int val = 0;
1483 axgbe_dev_info_get(dev, &dev_info);
1484 /* check that mtu is within the allowed range */
1485 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1487 /* mtu setting is forbidden if port is start */
1488 if (dev->data->dev_started) {
1489 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1490 dev->data->port_id);
1493 if (frame_size > AXGBE_ETH_MAX_LEN) {
1494 dev->data->dev_conf.rxmode.offloads |=
1495 DEV_RX_OFFLOAD_JUMBO_FRAME;
1498 dev->data->dev_conf.rxmode.offloads &=
1499 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1502 AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1503 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1508 axgbe_update_tstamp_time(struct axgbe_port *pdata,
1509 unsigned int sec, unsigned int nsec, int addsub)
1511 unsigned int count = 100;
1512 uint32_t sub_val = 0;
1513 uint32_t sub_val_sec = 0xFFFFFFFF;
1514 uint32_t sub_val_nsec = 0x3B9ACA00;
1518 sub_val = sub_val_sec - (sec - 1);
1522 AXGMAC_IOWRITE(pdata, MAC_STSUR, sub_val);
1523 sub_val = sub_val_nsec - nsec;
1524 AXGMAC_IOWRITE(pdata, MAC_STNUR, sub_val);
1525 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 1);
1527 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1528 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 0);
1529 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1531 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1);
1532 /* Wait for time update to complete */
1533 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
1537 static inline uint64_t
1538 div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
1540 *remainder = dividend % divisor;
1541 return dividend / divisor;
1544 static inline uint64_t
1545 div_u64(uint64_t dividend, uint32_t divisor)
1548 return div_u64_rem(dividend, divisor, &remainder);
1552 axgbe_adjfreq(struct axgbe_port *pdata, int64_t delta)
1555 uint32_t addend, diff;
1556 unsigned int neg_adjust = 0;
1562 adjust = (uint64_t)pdata->tstamp_addend;
1564 diff = (uint32_t)div_u64(adjust, 1000000000UL);
1565 addend = (neg_adjust) ? pdata->tstamp_addend - diff :
1566 pdata->tstamp_addend + diff;
1567 pdata->tstamp_addend = addend;
1568 axgbe_update_tstamp_addend(pdata, addend);
1573 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
1575 struct axgbe_port *pdata = dev->data->dev_private;
1576 struct timespec timestamp_delta;
1578 axgbe_adjfreq(pdata, delta);
1579 pdata->systime_tc.nsec += delta;
1583 timestamp_delta = rte_ns_to_timespec(delta);
1584 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec,
1585 timestamp_delta.tv_nsec, 1);
1587 timestamp_delta = rte_ns_to_timespec(delta);
1588 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec,
1589 timestamp_delta.tv_nsec, 0);
1595 axgbe_timesync_read_time(struct rte_eth_dev *dev,
1596 struct timespec *timestamp)
1599 struct axgbe_port *pdata = dev->data->dev_private;
1601 nsec = AXGMAC_IOREAD(pdata, MAC_STSR);
1602 nsec *= NSEC_PER_SEC;
1603 nsec += AXGMAC_IOREAD(pdata, MAC_STNR);
1604 *timestamp = rte_ns_to_timespec(nsec);
1608 axgbe_timesync_write_time(struct rte_eth_dev *dev,
1609 const struct timespec *timestamp)
1611 unsigned int count = 100;
1612 struct axgbe_port *pdata = dev->data->dev_private;
1614 AXGMAC_IOWRITE(pdata, MAC_STSUR, timestamp->tv_sec);
1615 AXGMAC_IOWRITE(pdata, MAC_STNUR, timestamp->tv_nsec);
1616 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1);
1617 /* Wait for time update to complete */
1618 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
1621 PMD_DRV_LOG(ERR, "Timed out update timestamp\n");
1626 axgbe_update_tstamp_addend(struct axgbe_port *pdata,
1629 unsigned int count = 100;
1631 AXGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1632 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1634 /* Wait for addend update to complete */
1635 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1638 PMD_DRV_LOG(ERR, "Timed out updating timestamp addend register\n");
1642 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec,
1645 unsigned int count = 100;
1647 /*System Time Sec Update*/
1648 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1649 /*System Time nanoSec Update*/
1650 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1651 /*Initialize Timestamp*/
1652 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1654 /* Wait for time update to complete */
1655 while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1658 PMD_DRV_LOG(ERR, "Timed out initializing timestamp\n");
1662 axgbe_timesync_enable(struct rte_eth_dev *dev)
1664 struct axgbe_port *pdata = dev->data->dev_private;
1665 unsigned int mac_tscr = 0;
1667 struct timespec timestamp;
1670 /* Set one nano-second accuracy */
1671 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1673 /* Set fine timestamp update */
1674 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1676 /* Overwrite earlier timestamps */
1677 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1679 AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1681 /* Enabling processing of ptp over eth pkt */
1682 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1683 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1684 /* Enable timestamp for all pkts*/
1685 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1687 /* enabling timestamp */
1688 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1689 AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1691 /* Exit if timestamping is not enabled */
1692 if (!AXGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) {
1693 PMD_DRV_LOG(ERR, "Exiting as timestamp is not enabled\n");
1697 /* Sub-second Increment Value*/
1698 AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, AXGBE_TSTAMP_SSINC);
1699 /* Sub-nanosecond Increment Value */
1700 AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, AXGBE_TSTAMP_SNSINC);
1702 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1703 dividend = 50000000;
1705 pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
1707 axgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1708 axgbe_set_tstamp_time(pdata, 0, 0);
1710 /* Initialize the timecounter */
1711 memset(&pdata->systime_tc, 0, sizeof(struct rte_timecounter));
1713 pdata->systime_tc.cc_mask = AXGBE_CYCLECOUNTER_MASK;
1714 pdata->systime_tc.cc_shift = 0;
1715 pdata->systime_tc.nsec_mask = 0;
1717 PMD_DRV_LOG(DEBUG, "Initializing system time counter with realtime\n");
1719 /* Updating the counter once with clock real time */
1720 clock_gettime(CLOCK_REALTIME, ×tamp);
1721 nsec = rte_timespec_to_ns(×tamp);
1722 nsec = rte_timecounter_update(&pdata->systime_tc, nsec);
1723 axgbe_set_tstamp_time(pdata, timestamp.tv_sec, timestamp.tv_nsec);
1728 axgbe_timesync_disable(struct rte_eth_dev *dev)
1730 struct axgbe_port *pdata = dev->data->dev_private;
1731 unsigned int mac_tscr = 0;
1733 /*disable timestamp for all pkts*/
1734 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 0);
1735 /*disable the addened register*/
1736 AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 0);
1737 /* disable timestamp update */
1738 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 0);
1739 /*disable time stamp*/
1740 AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 0);
1745 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
1746 struct timespec *timestamp, uint32_t flags)
1749 volatile union axgbe_rx_desc *desc;
1751 struct axgbe_rx_queue *rxq = *dev->data->rx_queues;
1753 idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
1754 desc = &rxq->desc[idx];
1756 while (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
1758 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CTXT)) {
1759 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_CONTEXT_DESC3, TSA) &&
1760 !AXGMAC_GET_BITS_LE(desc->write.desc3,
1761 RX_CONTEXT_DESC3, TSD)) {
1762 pmt = AXGMAC_GET_BITS_LE(desc->write.desc3,
1763 RX_CONTEXT_DESC3, PMT);
1764 nsec = rte_le_to_cpu_32(desc->write.desc1);
1765 nsec *= NSEC_PER_SEC;
1766 nsec += rte_le_to_cpu_32(desc->write.desc0);
1767 if (nsec != 0xffffffffffffffffULL) {
1769 *timestamp = rte_ns_to_timespec(nsec);
1771 "flags = 0x%x nsec = %"PRIu64"\n",
1781 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
1782 struct timespec *timestamp)
1785 struct axgbe_port *pdata = dev->data->dev_private;
1786 unsigned int tx_snr, tx_ssr;
1789 if (pdata->vdata->tx_tstamp_workaround) {
1790 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR);
1791 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR);
1794 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR);
1795 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR);
1797 if (AXGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) {
1798 PMD_DRV_LOG(DEBUG, "Waiting for TXTSSTSMIS\n");
1802 nsec *= NSEC_PER_SEC;
1804 PMD_DRV_LOG(DEBUG, "nsec = %"PRIu64" tx_ssr = %d tx_snr = %d\n",
1805 nsec, tx_ssr, tx_snr);
1806 *timestamp = rte_ns_to_timespec(nsec);
1811 axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1813 struct axgbe_port *pdata = dev->data->dev_private;
1814 unsigned long vid_bit, vid_idx;
1816 vid_bit = VLAN_TABLE_BIT(vid);
1817 vid_idx = VLAN_TABLE_IDX(vid);
1820 PMD_DRV_LOG(DEBUG, "Set VLAN vid=%d for device = %s\n",
1821 vid, pdata->eth_dev->device->name);
1822 pdata->active_vlans[vid_idx] |= vid_bit;
1824 PMD_DRV_LOG(DEBUG, "Reset VLAN vid=%d for device = %s\n",
1825 vid, pdata->eth_dev->device->name);
1826 pdata->active_vlans[vid_idx] &= ~vid_bit;
1828 pdata->hw_if.update_vlan_hash_table(pdata);
1833 axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1834 enum rte_vlan_type vlan_type,
1837 struct axgbe_port *pdata = dev->data->dev_private;
1841 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
1842 PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
1844 switch (vlan_type) {
1845 case ETH_VLAN_TYPE_INNER:
1846 PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_INNER\n");
1848 if (tpid != 0x8100 && tpid != 0x88a8)
1850 "tag supported 0x8100/0x88A8\n");
1851 PMD_DRV_LOG(DEBUG, "qinq with inner tag\n");
1853 /*Enable Inner VLAN Tag */
1854 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 1);
1855 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
1856 PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
1860 "Inner type not supported in single tag\n");
1863 case ETH_VLAN_TYPE_OUTER:
1864 PMD_DRV_LOG(DEBUG, "ETH_VLAN_TYPE_OUTER\n");
1866 PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
1867 /*Enable outer VLAN tag*/
1868 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 0);
1869 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
1870 PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
1872 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 1);
1873 reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANIR, CSVL);
1874 PMD_DRV_LOG(DEBUG, "bit CSVL = 0x%x\n", reg);
1876 if (tpid != 0x8100 && tpid != 0x88a8)
1878 "tag supported 0x8100/0x88A8\n");
1881 case ETH_VLAN_TYPE_MAX:
1882 PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_MAX\n");
1884 case ETH_VLAN_TYPE_UNKNOWN:
1885 PMD_DRV_LOG(ERR, "ETH_VLAN_TYPE_UNKNOWN\n");
1891 static void axgbe_vlan_extend_enable(struct axgbe_port *pdata)
1895 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 1);
1896 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
1897 PMD_DRV_LOG(DEBUG, "vlan double tag enabled EDVLP:qinq=0x%x\n", qinq);
1900 static void axgbe_vlan_extend_disable(struct axgbe_port *pdata)
1904 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 0);
1905 qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
1906 PMD_DRV_LOG(DEBUG, "vlan double tag disable EDVLP:qinq=0x%x\n", qinq);
1910 axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1912 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1913 struct axgbe_port *pdata = dev->data->dev_private;
1915 /* Indicate that VLAN Tx CTAGs come from context descriptors */
1916 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
1917 AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
1919 if (mask & ETH_VLAN_STRIP_MASK) {
1920 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
1921 PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
1922 pdata->eth_dev->device->name);
1923 pdata->hw_if.enable_rx_vlan_stripping(pdata);
1925 PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n",
1926 pdata->eth_dev->device->name);
1927 pdata->hw_if.disable_rx_vlan_stripping(pdata);
1930 if (mask & ETH_VLAN_FILTER_MASK) {
1931 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
1932 PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
1933 pdata->eth_dev->device->name);
1934 pdata->hw_if.enable_rx_vlan_filtering(pdata);
1936 PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n",
1937 pdata->eth_dev->device->name);
1938 pdata->hw_if.disable_rx_vlan_filtering(pdata);
1941 if (mask & ETH_VLAN_EXTEND_MASK) {
1942 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
1943 PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
1944 axgbe_vlan_extend_enable(pdata);
1945 /* Set global registers with default ethertype*/
1946 axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
1947 RTE_ETHER_TYPE_VLAN);
1948 axgbe_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER,
1949 RTE_ETHER_TYPE_VLAN);
1951 PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
1952 axgbe_vlan_extend_disable(pdata);
1958 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
1960 unsigned int mac_hfr0, mac_hfr1, mac_hfr2, mac_hfr3;
1961 struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1963 mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
1964 mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
1965 mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
1966 mac_hfr3 = AXGMAC_IOREAD(pdata, MAC_HWF3R);
1968 memset(hw_feat, 0, sizeof(*hw_feat));
1970 hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
1972 /* Hardware feature register 0 */
1973 hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
1974 hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
1975 hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
1976 hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
1977 hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
1978 hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
1979 hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
1980 hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
1981 hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
1982 hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
1983 hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
1984 hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
1986 hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
1987 hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
1989 /* Hardware feature register 1 */
1990 hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1992 hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1994 hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
1995 MAC_HWF1R, ADVTHWORD);
1996 hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
1997 hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
1998 hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
1999 hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
2000 hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
2001 hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
2002 hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
2003 hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
2005 hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
2008 /* Hardware feature register 2 */
2009 hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
2010 hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
2011 hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
2012 hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
2013 hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
2014 hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
2017 /* Hardware feature register 3 */
2018 hw_feat->tx_q_vlan_tag_ins = AXGMAC_GET_BITS(mac_hfr3,
2019 MAC_HWF3R, CBTISEL);
2020 hw_feat->no_of_vlan_extn = AXGMAC_GET_BITS(mac_hfr3,
2023 /* Translate the Hash Table size into actual number */
2024 switch (hw_feat->hash_table_size) {
2028 hw_feat->hash_table_size = 64;
2031 hw_feat->hash_table_size = 128;
2034 hw_feat->hash_table_size = 256;
2038 /* Translate the address width setting into actual number */
2039 switch (hw_feat->dma_width) {
2041 hw_feat->dma_width = 32;
2044 hw_feat->dma_width = 40;
2047 hw_feat->dma_width = 48;
2050 hw_feat->dma_width = 32;
2053 /* The Queue, Channel and TC counts are zero based so increment them
2054 * to get the actual number
2056 hw_feat->rx_q_cnt++;
2057 hw_feat->tx_q_cnt++;
2058 hw_feat->rx_ch_cnt++;
2059 hw_feat->tx_ch_cnt++;
2062 /* Translate the fifo sizes into actual numbers */
2063 hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
2064 hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
2067 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
2069 axgbe_init_function_ptrs_dev(&pdata->hw_if);
2070 axgbe_init_function_ptrs_phy(&pdata->phy_if);
2071 axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
2072 pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
2075 static void axgbe_set_counts(struct axgbe_port *pdata)
2077 /* Set all the function pointers */
2078 axgbe_init_all_fptrs(pdata);
2080 /* Populate the hardware features */
2081 axgbe_get_all_hw_features(pdata);
2083 /* Set default max values if not provided */
2084 if (!pdata->tx_max_channel_count)
2085 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
2086 if (!pdata->rx_max_channel_count)
2087 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
2089 if (!pdata->tx_max_q_count)
2090 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
2091 if (!pdata->rx_max_q_count)
2092 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
2094 /* Calculate the number of Tx and Rx rings to be created
2095 * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
2096 * the number of Tx queues to the number of Tx channels
2098 * -Rx (DMA) Channels do not map 1-to-1 so use the actual
2099 * number of Rx queues or maximum allowed
2101 pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
2102 pdata->tx_max_channel_count);
2103 pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
2104 pdata->tx_max_q_count);
2106 pdata->tx_q_count = pdata->tx_ring_count;
2108 pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
2109 pdata->rx_max_channel_count);
2111 pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
2112 pdata->rx_max_q_count);
2115 static void axgbe_default_config(struct axgbe_port *pdata)
2117 pdata->pblx8 = DMA_PBL_X8_ENABLE;
2118 pdata->tx_sf_mode = MTL_TSF_ENABLE;
2119 pdata->tx_threshold = MTL_TX_THRESHOLD_64;
2120 pdata->tx_pbl = DMA_PBL_32;
2121 pdata->tx_osp_mode = DMA_OSP_ENABLE;
2122 pdata->rx_sf_mode = MTL_RSF_ENABLE;
2123 pdata->rx_threshold = MTL_RX_THRESHOLD_64;
2124 pdata->rx_pbl = DMA_PBL_32;
2125 pdata->pause_autoneg = 1;
2126 pdata->tx_pause = 0;
2127 pdata->rx_pause = 0;
2128 pdata->phy_speed = SPEED_UNKNOWN;
2129 pdata->power_down = 0;
2133 pci_device_cmp(const struct rte_device *dev, const void *_pci_id)
2135 const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
2136 const struct rte_pci_id *pcid = _pci_id;
2138 if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID &&
2139 pdev->id.device_id == pcid->device_id)
2145 pci_search_device(int device_id)
2147 struct rte_bus *pci_bus;
2148 struct rte_pci_id dev_id;
2150 dev_id.device_id = device_id;
2151 pci_bus = rte_bus_find_by_name("pci");
2152 return (pci_bus != NULL) &&
2153 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL);
2157 * It returns 0 on success.
2160 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
2162 PMD_INIT_FUNC_TRACE();
2163 struct axgbe_port *pdata;
2164 struct rte_pci_device *pci_dev;
2165 uint32_t reg, mac_lo, mac_hi;
2169 eth_dev->dev_ops = &axgbe_eth_dev_ops;
2171 eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status;
2172 eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status;
2175 * For secondary processes, we don't initialise any further as primary
2176 * has already done this work.
2178 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2181 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2183 pdata = eth_dev->data->dev_private;
2185 rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
2186 rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
2187 pdata->eth_dev = eth_dev;
2189 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2190 pdata->pci_dev = pci_dev;
2193 * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
2195 if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) {
2196 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
2197 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
2199 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
2200 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
2204 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
2205 pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
2206 + AXGBE_MAC_PROP_OFFSET);
2207 pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
2208 + AXGBE_I2C_CTRL_OFFSET);
2209 pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
2211 /* version specific driver data*/
2212 if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
2213 pdata->vdata = &axgbe_v2a;
2215 pdata->vdata = &axgbe_v2b;
2217 /* Configure the PCS indirect addressing support */
2218 reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
2219 pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
2220 pdata->xpcs_window <<= 6;
2221 pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
2222 pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
2223 pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
2226 "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
2227 pdata->xpcs_window_size, pdata->xpcs_window_mask);
2228 XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
2230 /* Retrieve the MAC address */
2231 mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
2232 mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
2233 pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
2234 pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
2235 pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
2236 pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
2237 pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
2238 pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
2240 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS;
2241 eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0);
2243 if (!eth_dev->data->mac_addrs) {
2245 "Failed to alloc %u bytes needed to "
2246 "store MAC addresses", len);
2250 /* Allocate memory for storing hash filter MAC addresses */
2251 len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS;
2252 eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr",
2255 if (eth_dev->data->hash_mac_addrs == NULL) {
2257 "Failed to allocate %d bytes needed to "
2258 "store MAC addresses", len);
2262 if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
2263 rte_eth_random_addr(pdata->mac_addr.addr_bytes);
2265 /* Copy the permanent MAC address */
2266 rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]);
2268 /* Clock settings */
2269 pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
2270 pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
2272 /* Set the DMA coherency values */
2273 pdata->coherent = 1;
2274 pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
2275 pdata->arcache = AXGBE_DMA_OS_ARCACHE;
2276 pdata->awcache = AXGBE_DMA_OS_AWCACHE;
2278 /* Set the maximum channels and queues */
2279 reg = XP_IOREAD(pdata, XP_PROP_1);
2280 pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
2281 pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
2282 pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
2283 pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
2285 /* Set the hardware channel and queue counts */
2286 axgbe_set_counts(pdata);
2288 /* Set the maximum fifo amounts */
2289 reg = XP_IOREAD(pdata, XP_PROP_2);
2290 pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
2291 pdata->tx_max_fifo_size *= 16384;
2292 pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
2293 pdata->vdata->tx_max_fifo_size);
2294 pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
2295 pdata->rx_max_fifo_size *= 16384;
2296 pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
2297 pdata->vdata->rx_max_fifo_size);
2298 /* Issue software reset to DMA */
2299 ret = pdata->hw_if.exit(pdata);
2301 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
2303 /* Set default configuration data */
2304 axgbe_default_config(pdata);
2306 /* Set default max values if not provided */
2307 if (!pdata->tx_max_fifo_size)
2308 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
2309 if (!pdata->rx_max_fifo_size)
2310 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
2312 pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
2313 pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
2314 pthread_mutex_init(&pdata->xpcs_mutex, NULL);
2315 pthread_mutex_init(&pdata->i2c_mutex, NULL);
2316 pthread_mutex_init(&pdata->an_mutex, NULL);
2317 pthread_mutex_init(&pdata->phy_mutex, NULL);
2319 ret = pdata->phy_if.phy_init(pdata);
2321 rte_free(eth_dev->data->mac_addrs);
2322 eth_dev->data->mac_addrs = NULL;
2326 rte_intr_callback_register(&pci_dev->intr_handle,
2327 axgbe_dev_interrupt_handler,
2329 PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
2330 eth_dev->data->port_id, pci_dev->id.vendor_id,
2331 pci_dev->id.device_id);
2337 axgbe_dev_close(struct rte_eth_dev *eth_dev)
2339 struct rte_pci_device *pci_dev;
2341 PMD_INIT_FUNC_TRACE();
2343 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2346 pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2347 axgbe_dev_clear_queues(eth_dev);
2349 /* disable uio intr before callback unregister */
2350 rte_intr_disable(&pci_dev->intr_handle);
2351 rte_intr_callback_unregister(&pci_dev->intr_handle,
2352 axgbe_dev_interrupt_handler,
2358 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2359 struct rte_pci_device *pci_dev)
2361 return rte_eth_dev_pci_generic_probe(pci_dev,
2362 sizeof(struct axgbe_port), eth_axgbe_dev_init);
2365 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
2367 return rte_eth_dev_pci_generic_remove(pci_dev, axgbe_dev_close);
2370 static struct rte_pci_driver rte_axgbe_pmd = {
2371 .id_table = pci_id_axgbe_map,
2372 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2373 .probe = eth_axgbe_pci_probe,
2374 .remove = eth_axgbe_pci_remove,
2377 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
2378 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
2379 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
2380 RTE_LOG_REGISTER_SUFFIX(axgbe_logtype_init, init, NOTICE);
2381 RTE_LOG_REGISTER_SUFFIX(axgbe_logtype_driver, driver, NOTICE);