net/axgbe: simplify rate change mailbox interface
[dpdk.git] / drivers / net / axgbe / axgbe_ethdev.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
9 #include "axgbe_phy.h"
10 #include "axgbe_regs.h"
11 #include "rte_time.h"
12
13 #include "eal_filesystem.h"
14
15 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
16 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
17 static int  axgbe_dev_start(struct rte_eth_dev *dev);
18 static int  axgbe_dev_stop(struct rte_eth_dev *dev);
19 static void axgbe_dev_interrupt_handler(void *param);
20 static int axgbe_dev_close(struct rte_eth_dev *dev);
21 static int axgbe_dev_reset(struct rte_eth_dev *dev);
22 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
23 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
24 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
25 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
26 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev,
27                                   struct rte_ether_addr *mac_addr);
28 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev,
29                                   struct rte_ether_addr *mac_addr,
30                                   uint32_t index,
31                                   uint32_t vmdq);
32 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
33 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
34                                       struct rte_ether_addr *mc_addr_set,
35                                       uint32_t nb_mc_addr);
36 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
37                                        struct rte_ether_addr *mac_addr,
38                                        uint8_t add);
39 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev,
40                                            uint8_t add);
41 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
42                                  int wait_to_complete);
43 static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
44                               struct rte_dev_reg_info *regs);
45 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
46                                 struct rte_eth_stats *stats);
47 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
48 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
49                                 struct rte_eth_xstat *stats,
50                                 unsigned int n);
51 static int
52 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
53                            struct rte_eth_xstat_name *xstats_names,
54                            unsigned int size);
55 static int
56 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
57                            const uint64_t *ids,
58                            uint64_t *values,
59                            unsigned int n);
60 static int
61 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
62                                  const uint64_t *ids,
63                                  struct rte_eth_xstat_name *xstats_names,
64                                  unsigned int size);
65 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
66 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
67                           struct rte_eth_rss_reta_entry64 *reta_conf,
68                           uint16_t reta_size);
69 static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
70                          struct rte_eth_rss_reta_entry64 *reta_conf,
71                          uint16_t reta_size);
72 static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
73                                      struct rte_eth_rss_conf *rss_conf);
74 static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
75                                        struct rte_eth_rss_conf *rss_conf);
76 static int  axgbe_dev_info_get(struct rte_eth_dev *dev,
77                                struct rte_eth_dev_info *dev_info);
78 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev,
79                                 struct rte_eth_fc_conf *fc_conf);
80 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev,
81                                 struct rte_eth_fc_conf *fc_conf);
82 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
83                                 struct rte_eth_pfc_conf *pfc_conf);
84 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
85         struct rte_eth_rxq_info *qinfo);
86 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
87         struct rte_eth_txq_info *qinfo);
88 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
89 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
90
91 static int
92 axgbe_timesync_enable(struct rte_eth_dev *dev);
93 static int
94 axgbe_timesync_disable(struct rte_eth_dev *dev);
95 static int
96 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
97                         struct timespec *timestamp, uint32_t flags);
98 static int
99 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
100                         struct timespec *timestamp);
101 static int
102 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta);
103 static int
104 axgbe_timesync_read_time(struct rte_eth_dev *dev,
105                         struct timespec *timestamp);
106 static int
107 axgbe_timesync_write_time(struct rte_eth_dev *dev,
108                         const struct timespec *timestamp);
109 static void
110 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec,
111                         unsigned int nsec);
112 static void
113 axgbe_update_tstamp_addend(struct axgbe_port *pdata,
114                         unsigned int addend);
115 static int
116         axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on);
117 static int axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
118                                 enum rte_vlan_type vlan_type, uint16_t tpid);
119 static int axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
120
121 struct axgbe_xstats {
122         char name[RTE_ETH_XSTATS_NAME_SIZE];
123         int offset;
124 };
125
126 #define AXGMAC_MMC_STAT(_string, _var)                           \
127         { _string,                                              \
128           offsetof(struct axgbe_mmc_stats, _var),       \
129         }
130
131 static const struct axgbe_xstats axgbe_xstats_strings[] = {
132         AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
133         AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
134         AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
135         AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
136         AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
137         AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
138         AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
139         AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
140         AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
141         AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
142         AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
143         AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
144         AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
145         AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
146
147         AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
148         AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
149         AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
150         AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
151         AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
152         AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
153         AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
154         AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
155         AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
156         AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
157         AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
158         AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
159         AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
160         AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
161         AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
162         AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
163         AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
164         AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
165         AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
166         AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
167         AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
168         AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
169 };
170
171 #define AXGBE_XSTATS_COUNT        ARRAY_SIZE(axgbe_xstats_strings)
172
173 /* The set of PCI devices this driver supports */
174 #define AMD_PCI_VENDOR_ID       0x1022
175 #define AMD_PCI_RV_ROOT_COMPLEX_ID      0x15d0
176 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
177 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
178
179 static const struct rte_pci_id pci_id_axgbe_map[] = {
180         {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
181         {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
182         { .vendor_id = 0, },
183 };
184
185 static struct axgbe_version_data axgbe_v2a = {
186         .init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
187         .xpcs_access                    = AXGBE_XPCS_ACCESS_V2,
188         .mmc_64bit                      = 1,
189         .tx_max_fifo_size               = 229376,
190         .rx_max_fifo_size               = 229376,
191         .tx_tstamp_workaround           = 1,
192         .ecc_support                    = 1,
193         .i2c_support                    = 1,
194         .an_cdr_workaround              = 1,
195 };
196
197 static struct axgbe_version_data axgbe_v2b = {
198         .init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
199         .xpcs_access                    = AXGBE_XPCS_ACCESS_V2,
200         .mmc_64bit                      = 1,
201         .tx_max_fifo_size               = 65536,
202         .rx_max_fifo_size               = 65536,
203         .tx_tstamp_workaround           = 1,
204         .ecc_support                    = 1,
205         .i2c_support                    = 1,
206         .an_cdr_workaround              = 1,
207 };
208
209 static const struct rte_eth_desc_lim rx_desc_lim = {
210         .nb_max = AXGBE_MAX_RING_DESC,
211         .nb_min = AXGBE_MIN_RING_DESC,
212         .nb_align = 8,
213 };
214
215 static const struct rte_eth_desc_lim tx_desc_lim = {
216         .nb_max = AXGBE_MAX_RING_DESC,
217         .nb_min = AXGBE_MIN_RING_DESC,
218         .nb_align = 8,
219 };
220
221 static const struct eth_dev_ops axgbe_eth_dev_ops = {
222         .dev_configure        = axgbe_dev_configure,
223         .dev_start            = axgbe_dev_start,
224         .dev_stop             = axgbe_dev_stop,
225         .dev_close            = axgbe_dev_close,
226         .dev_reset            = axgbe_dev_reset,
227         .promiscuous_enable   = axgbe_dev_promiscuous_enable,
228         .promiscuous_disable  = axgbe_dev_promiscuous_disable,
229         .allmulticast_enable  = axgbe_dev_allmulticast_enable,
230         .allmulticast_disable = axgbe_dev_allmulticast_disable,
231         .mac_addr_set         = axgbe_dev_mac_addr_set,
232         .mac_addr_add         = axgbe_dev_mac_addr_add,
233         .mac_addr_remove      = axgbe_dev_mac_addr_remove,
234         .set_mc_addr_list     = axgbe_dev_set_mc_addr_list,
235         .uc_hash_table_set    = axgbe_dev_uc_hash_table_set,
236         .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set,
237         .link_update          = axgbe_dev_link_update,
238         .get_reg              = axgbe_dev_get_regs,
239         .stats_get            = axgbe_dev_stats_get,
240         .stats_reset          = axgbe_dev_stats_reset,
241         .xstats_get           = axgbe_dev_xstats_get,
242         .xstats_reset         = axgbe_dev_xstats_reset,
243         .xstats_get_names     = axgbe_dev_xstats_get_names,
244         .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
245         .xstats_get_by_id     = axgbe_dev_xstats_get_by_id,
246         .reta_update          = axgbe_dev_rss_reta_update,
247         .reta_query           = axgbe_dev_rss_reta_query,
248         .rss_hash_update      = axgbe_dev_rss_hash_update,
249         .rss_hash_conf_get    = axgbe_dev_rss_hash_conf_get,
250         .dev_infos_get        = axgbe_dev_info_get,
251         .rx_queue_setup       = axgbe_dev_rx_queue_setup,
252         .rx_queue_release     = axgbe_dev_rx_queue_release,
253         .tx_queue_setup       = axgbe_dev_tx_queue_setup,
254         .tx_queue_release     = axgbe_dev_tx_queue_release,
255         .flow_ctrl_get        = axgbe_flow_ctrl_get,
256         .flow_ctrl_set        = axgbe_flow_ctrl_set,
257         .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set,
258         .rxq_info_get                 = axgbe_rxq_info_get,
259         .txq_info_get                 = axgbe_txq_info_get,
260         .dev_supported_ptypes_get     = axgbe_dev_supported_ptypes_get,
261         .mtu_set                = axgb_mtu_set,
262         .vlan_filter_set      = axgbe_vlan_filter_set,
263         .vlan_tpid_set        = axgbe_vlan_tpid_set,
264         .vlan_offload_set     = axgbe_vlan_offload_set,
265         .timesync_enable              = axgbe_timesync_enable,
266         .timesync_disable             = axgbe_timesync_disable,
267         .timesync_read_rx_timestamp   = axgbe_timesync_read_rx_timestamp,
268         .timesync_read_tx_timestamp   = axgbe_timesync_read_tx_timestamp,
269         .timesync_adjust_time         = axgbe_timesync_adjust_time,
270         .timesync_read_time           = axgbe_timesync_read_time,
271         .timesync_write_time          = axgbe_timesync_write_time,
272         .fw_version_get                 = axgbe_dev_fw_version_get,
273 };
274
275 static int axgbe_phy_reset(struct axgbe_port *pdata)
276 {
277         pdata->phy_link = -1;
278         pdata->phy_speed = SPEED_UNKNOWN;
279         return pdata->phy_if.phy_reset(pdata);
280 }
281
282 /*
283  * Interrupt handler triggered by NIC  for handling
284  * specific interrupt.
285  *
286  * @param handle
287  *  Pointer to interrupt handle.
288  * @param param
289  *  The address of parameter (struct rte_eth_dev *) registered before.
290  *
291  * @return
292  *  void
293  */
294 static void
295 axgbe_dev_interrupt_handler(void *param)
296 {
297         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
298         struct axgbe_port *pdata = dev->data->dev_private;
299         unsigned int dma_isr, dma_ch_isr;
300
301         pdata->phy_if.an_isr(pdata);
302         /*DMA related interrupts*/
303         dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
304         PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
305         if (dma_isr) {
306                 if (dma_isr & 1) {
307                         dma_ch_isr =
308                                 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
309                                                   pdata->rx_queues[0],
310                                                   DMA_CH_SR);
311                         PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
312                         AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
313                                            pdata->rx_queues[0],
314                                            DMA_CH_SR, dma_ch_isr);
315                 }
316         }
317         /* Unmask interrupts since disabled after generation */
318         rte_intr_ack(pdata->pci_dev->intr_handle);
319 }
320
321 /*
322  * Configure device link speed and setup link.
323  * It returns 0 on success.
324  */
325 static int
326 axgbe_dev_configure(struct rte_eth_dev *dev)
327 {
328         struct axgbe_port *pdata =  dev->data->dev_private;
329         /* Checksum offload to hardware */
330         pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
331                                 RTE_ETH_RX_OFFLOAD_CHECKSUM;
332         return 0;
333 }
334
335 static int
336 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
337 {
338         struct axgbe_port *pdata = dev->data->dev_private;
339
340         if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
341                 pdata->rss_enable = 1;
342         else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
343                 pdata->rss_enable = 0;
344         else
345                 return  -1;
346         return 0;
347 }
348
349 static int
350 axgbe_dev_start(struct rte_eth_dev *dev)
351 {
352         struct axgbe_port *pdata = dev->data->dev_private;
353         int ret;
354         struct rte_eth_dev_data *dev_data = dev->data;
355         uint16_t max_pkt_len;
356
357         dev->dev_ops = &axgbe_eth_dev_ops;
358
359         PMD_INIT_FUNC_TRACE();
360
361         /* Multiqueue RSS */
362         ret = axgbe_dev_rx_mq_config(dev);
363         if (ret) {
364                 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
365                 return ret;
366         }
367         ret = axgbe_phy_reset(pdata);
368         if (ret) {
369                 PMD_DRV_LOG(ERR, "phy reset failed\n");
370                 return ret;
371         }
372         ret = pdata->hw_if.init(pdata);
373         if (ret) {
374                 PMD_DRV_LOG(ERR, "dev_init failed\n");
375                 return ret;
376         }
377
378         /* enable uio/vfio intr/eventfd mapping */
379         rte_intr_enable(pdata->pci_dev->intr_handle);
380
381         /* phy start*/
382         pdata->phy_if.phy_start(pdata);
383         axgbe_dev_enable_tx(dev);
384         axgbe_dev_enable_rx(dev);
385
386         rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
387         rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
388
389         max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
390         if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
391                                 max_pkt_len > pdata->rx_buf_size)
392                 dev_data->scattered_rx = 1;
393
394         /*  Scatter Rx handling */
395         if (dev_data->scattered_rx)
396                 dev->rx_pkt_burst = &eth_axgbe_recv_scattered_pkts;
397         else
398                 dev->rx_pkt_burst = &axgbe_recv_pkts;
399
400         return 0;
401 }
402
403 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
404 static int
405 axgbe_dev_stop(struct rte_eth_dev *dev)
406 {
407         struct axgbe_port *pdata = dev->data->dev_private;
408
409         PMD_INIT_FUNC_TRACE();
410
411         rte_intr_disable(pdata->pci_dev->intr_handle);
412
413         if (rte_bit_relaxed_get32(AXGBE_STOPPED, &pdata->dev_state))
414                 return 0;
415
416         rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
417         axgbe_dev_disable_tx(dev);
418         axgbe_dev_disable_rx(dev);
419
420         pdata->phy_if.phy_stop(pdata);
421         pdata->hw_if.exit(pdata);
422         memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
423         rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
424
425         return 0;
426 }
427
428 static int
429 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
430 {
431         struct axgbe_port *pdata = dev->data->dev_private;
432
433         PMD_INIT_FUNC_TRACE();
434
435         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
436
437         return 0;
438 }
439
440 static int
441 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
442 {
443         struct axgbe_port *pdata = dev->data->dev_private;
444
445         PMD_INIT_FUNC_TRACE();
446
447         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
448
449         return 0;
450 }
451
452 static int
453 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
454 {
455         struct axgbe_port *pdata = dev->data->dev_private;
456
457         PMD_INIT_FUNC_TRACE();
458
459         if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
460                 return 0;
461         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
462
463         return 0;
464 }
465
466 static int
467 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
468 {
469         struct axgbe_port *pdata = dev->data->dev_private;
470
471         PMD_INIT_FUNC_TRACE();
472
473         if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
474                 return 0;
475         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
476
477         return 0;
478 }
479
480 static int
481 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
482 {
483         struct axgbe_port *pdata = dev->data->dev_private;
484
485         /* Set Default MAC Addr */
486         axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0);
487
488         return 0;
489 }
490
491 static int
492 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
493                               uint32_t index, uint32_t pool __rte_unused)
494 {
495         struct axgbe_port *pdata = dev->data->dev_private;
496         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
497
498         if (index > hw_feat->addn_mac) {
499                 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
500                 return -EINVAL;
501         }
502         axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
503         return 0;
504 }
505
506 static int
507 axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
508                           struct rte_eth_rss_reta_entry64 *reta_conf,
509                           uint16_t reta_size)
510 {
511         struct axgbe_port *pdata = dev->data->dev_private;
512         unsigned int i, idx, shift;
513         int ret;
514
515         if (!pdata->rss_enable) {
516                 PMD_DRV_LOG(ERR, "RSS not enabled\n");
517                 return -ENOTSUP;
518         }
519
520         if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
521                 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
522                 return -EINVAL;
523         }
524
525         for (i = 0; i < reta_size; i++) {
526                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
527                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
528                 if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
529                         continue;
530                 pdata->rss_table[i] = reta_conf[idx].reta[shift];
531         }
532
533         /* Program the lookup table */
534         ret = axgbe_write_rss_lookup_table(pdata);
535         return ret;
536 }
537
538 static int
539 axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
540                          struct rte_eth_rss_reta_entry64 *reta_conf,
541                          uint16_t reta_size)
542 {
543         struct axgbe_port *pdata = dev->data->dev_private;
544         unsigned int i, idx, shift;
545
546         if (!pdata->rss_enable) {
547                 PMD_DRV_LOG(ERR, "RSS not enabled\n");
548                 return -ENOTSUP;
549         }
550
551         if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
552                 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
553                 return -EINVAL;
554         }
555
556         for (i = 0; i < reta_size; i++) {
557                 idx = i / RTE_ETH_RETA_GROUP_SIZE;
558                 shift = i % RTE_ETH_RETA_GROUP_SIZE;
559                 if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
560                         continue;
561                 reta_conf[idx].reta[shift] = pdata->rss_table[i];
562         }
563         return 0;
564 }
565
566 static int
567 axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
568                           struct rte_eth_rss_conf *rss_conf)
569 {
570         struct axgbe_port *pdata = dev->data->dev_private;
571         int ret;
572
573         if (!pdata->rss_enable) {
574                 PMD_DRV_LOG(ERR, "RSS not enabled\n");
575                 return -ENOTSUP;
576         }
577
578         if (rss_conf == NULL) {
579                 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
580                 return -EINVAL;
581         }
582
583         if (rss_conf->rss_key != NULL &&
584             rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) {
585                 rte_memcpy(pdata->rss_key, rss_conf->rss_key,
586                        AXGBE_RSS_HASH_KEY_SIZE);
587                 /* Program the hash key */
588                 ret = axgbe_write_rss_hash_key(pdata);
589                 if (ret != 0)
590                         return ret;
591         }
592
593         pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
594
595         if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
596                 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
597         if (pdata->rss_hf &
598             (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
599                 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
600         if (pdata->rss_hf &
601             (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
602                 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
603
604         /* Set the RSS options */
605         AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
606
607         return 0;
608 }
609
610 static int
611 axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
612                             struct rte_eth_rss_conf *rss_conf)
613 {
614         struct axgbe_port *pdata = dev->data->dev_private;
615
616         if (!pdata->rss_enable) {
617                 PMD_DRV_LOG(ERR, "RSS not enabled\n");
618                 return -ENOTSUP;
619         }
620
621         if (rss_conf == NULL) {
622                 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
623                 return -EINVAL;
624         }
625
626         if (rss_conf->rss_key != NULL &&
627             rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) {
628                 rte_memcpy(rss_conf->rss_key, pdata->rss_key,
629                        AXGBE_RSS_HASH_KEY_SIZE);
630         }
631         rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE;
632         rss_conf->rss_hf = pdata->rss_hf;
633         return 0;
634 }
635
636 static int
637 axgbe_dev_reset(struct rte_eth_dev *dev)
638 {
639         int ret = 0;
640
641         ret = axgbe_dev_close(dev);
642         if (ret)
643                 return ret;
644
645         ret = eth_axgbe_dev_init(dev);
646
647         return ret;
648 }
649
650 static void
651 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
652 {
653         struct axgbe_port *pdata = dev->data->dev_private;
654         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
655
656         if (index > hw_feat->addn_mac) {
657                 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
658                 return;
659         }
660         axgbe_set_mac_addn_addr(pdata, NULL, index);
661 }
662
663 static int
664 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
665                                       struct rte_ether_addr *mc_addr_set,
666                                       uint32_t nb_mc_addr)
667 {
668         struct axgbe_port *pdata = dev->data->dev_private;
669         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
670         uint32_t index = 1; /* 0 is always default mac */
671         uint32_t i;
672
673         if (nb_mc_addr > hw_feat->addn_mac) {
674                 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
675                 return -EINVAL;
676         }
677
678         /* clear unicast addresses */
679         for (i = 1; i < hw_feat->addn_mac; i++) {
680                 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
681                         continue;
682                 memset(&dev->data->mac_addrs[i], 0,
683                        sizeof(struct rte_ether_addr));
684         }
685
686         while (nb_mc_addr--)
687                 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++);
688
689         return 0;
690 }
691
692 static int
693 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
694                             struct rte_ether_addr *mac_addr, uint8_t add)
695 {
696         struct axgbe_port *pdata = dev->data->dev_private;
697         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
698
699         if (!hw_feat->hash_table_size) {
700                 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
701                 return -ENOTSUP;
702         }
703
704         axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add);
705
706         if (pdata->uc_hash_mac_addr > 0) {
707                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
708                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
709         } else {
710                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
711                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
712         }
713         return 0;
714 }
715
716 static int
717 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
718 {
719         struct axgbe_port *pdata = dev->data->dev_private;
720         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
721         uint32_t index;
722
723         if (!hw_feat->hash_table_size) {
724                 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
725                 return -ENOTSUP;
726         }
727
728         for (index = 0; index < pdata->hash_table_count; index++) {
729                 if (add)
730                         pdata->uc_hash_table[index] = ~0;
731                 else
732                         pdata->uc_hash_table[index] = 0;
733
734                 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n",
735                             add ? "set" : "clear", index);
736
737                 AXGMAC_IOWRITE(pdata, MAC_HTR(index),
738                                pdata->uc_hash_table[index]);
739         }
740
741         if (add) {
742                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
743                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
744         } else {
745                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
746                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
747         }
748         return 0;
749 }
750
751 /* return 0 means link status changed, -1 means not changed */
752 static int
753 axgbe_dev_link_update(struct rte_eth_dev *dev,
754                       int wait_to_complete __rte_unused)
755 {
756         struct axgbe_port *pdata = dev->data->dev_private;
757         struct rte_eth_link link;
758         int ret = 0;
759
760         PMD_INIT_FUNC_TRACE();
761         rte_delay_ms(800);
762
763         pdata->phy_if.phy_status(pdata);
764
765         memset(&link, 0, sizeof(struct rte_eth_link));
766         link.link_duplex = pdata->phy.duplex;
767         link.link_status = pdata->phy_link;
768         link.link_speed = pdata->phy_speed;
769         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
770                               RTE_ETH_LINK_SPEED_FIXED);
771         ret = rte_eth_linkstatus_set(dev, &link);
772         if (ret == -1)
773                 PMD_DRV_LOG(ERR, "No change in link status\n");
774
775         return ret;
776 }
777
778 static int
779 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
780 {
781         struct axgbe_port *pdata = dev->data->dev_private;
782
783         if (regs->data == NULL) {
784                 regs->length = axgbe_regs_get_count(pdata);
785                 regs->width = sizeof(uint32_t);
786                 return 0;
787         }
788
789         /* Only full register dump is supported */
790         if (regs->length &&
791             regs->length != (uint32_t)axgbe_regs_get_count(pdata))
792                 return -ENOTSUP;
793
794         regs->version = pdata->pci_dev->id.vendor_id << 16 |
795                         pdata->pci_dev->id.device_id;
796         axgbe_regs_dump(pdata, regs->data);
797         return 0;
798 }
799 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
800 {
801         struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
802
803         /* Freeze counters */
804         AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
805
806         /* Tx counters */
807         stats->txoctetcount_gb +=
808                 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
809         stats->txoctetcount_gb +=
810         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
811
812         stats->txframecount_gb +=
813                 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
814         stats->txframecount_gb +=
815         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
816
817         stats->txbroadcastframes_g +=
818                 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
819         stats->txbroadcastframes_g +=
820         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
821
822         stats->txmulticastframes_g +=
823                 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
824         stats->txmulticastframes_g +=
825         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
826
827         stats->tx64octets_gb +=
828                 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
829         stats->tx64octets_gb +=
830         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
831
832         stats->tx65to127octets_gb +=
833                 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
834         stats->tx65to127octets_gb +=
835         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
836
837         stats->tx128to255octets_gb +=
838                 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
839         stats->tx128to255octets_gb +=
840         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
841
842         stats->tx256to511octets_gb +=
843                 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
844         stats->tx256to511octets_gb +=
845         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
846
847         stats->tx512to1023octets_gb +=
848                 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
849         stats->tx512to1023octets_gb +=
850         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
851
852         stats->tx1024tomaxoctets_gb +=
853                 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
854         stats->tx1024tomaxoctets_gb +=
855         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
856
857         stats->txunicastframes_gb +=
858                 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
859         stats->txunicastframes_gb +=
860         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
861
862         stats->txmulticastframes_gb +=
863                 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
864         stats->txmulticastframes_gb +=
865         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
866
867         stats->txbroadcastframes_g +=
868                 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
869         stats->txbroadcastframes_g +=
870         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
871
872         stats->txunderflowerror +=
873                 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
874         stats->txunderflowerror +=
875         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
876
877         stats->txoctetcount_g +=
878                 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
879         stats->txoctetcount_g +=
880         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
881
882         stats->txframecount_g +=
883                 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
884         stats->txframecount_g +=
885         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
886
887         stats->txpauseframes +=
888                 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
889         stats->txpauseframes +=
890         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
891
892         stats->txvlanframes_g +=
893                 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
894         stats->txvlanframes_g +=
895         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
896
897         /* Rx counters */
898         stats->rxframecount_gb +=
899                 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
900         stats->rxframecount_gb +=
901         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
902
903         stats->rxoctetcount_gb +=
904                 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
905         stats->rxoctetcount_gb +=
906         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
907
908         stats->rxoctetcount_g +=
909                 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
910         stats->rxoctetcount_g +=
911         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
912
913         stats->rxbroadcastframes_g +=
914                 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
915         stats->rxbroadcastframes_g +=
916         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
917
918         stats->rxmulticastframes_g +=
919                 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
920         stats->rxmulticastframes_g +=
921         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
922
923         stats->rxcrcerror +=
924                 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
925         stats->rxcrcerror +=
926         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
927
928         stats->rxrunterror +=
929                 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
930
931         stats->rxjabbererror +=
932                 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
933
934         stats->rxundersize_g +=
935                 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
936
937         stats->rxoversize_g +=
938                 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
939
940         stats->rx64octets_gb +=
941                 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
942         stats->rx64octets_gb +=
943         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
944
945         stats->rx65to127octets_gb +=
946                 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
947         stats->rx65to127octets_gb +=
948         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
949
950         stats->rx128to255octets_gb +=
951                 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
952         stats->rx128to255octets_gb +=
953         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
954
955         stats->rx256to511octets_gb +=
956                 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
957         stats->rx256to511octets_gb +=
958         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
959
960         stats->rx512to1023octets_gb +=
961                 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
962         stats->rx512to1023octets_gb +=
963         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
964
965         stats->rx1024tomaxoctets_gb +=
966                 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
967         stats->rx1024tomaxoctets_gb +=
968         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
969
970         stats->rxunicastframes_g +=
971                 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
972         stats->rxunicastframes_g +=
973         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
974
975         stats->rxlengtherror +=
976                 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
977         stats->rxlengtherror +=
978         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
979
980         stats->rxoutofrangetype +=
981                 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
982         stats->rxoutofrangetype +=
983         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
984
985         stats->rxpauseframes +=
986                 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
987         stats->rxpauseframes +=
988         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
989
990         stats->rxfifooverflow +=
991                 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
992         stats->rxfifooverflow +=
993         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
994
995         stats->rxvlanframes_gb +=
996                 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
997         stats->rxvlanframes_gb +=
998         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
999
1000         stats->rxwatchdogerror +=
1001                 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
1002
1003         /* Un-freeze counters */
1004         AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
1005 }
1006
1007 static int
1008 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1009                      unsigned int n)
1010 {
1011         struct axgbe_port *pdata = dev->data->dev_private;
1012         unsigned int i;
1013
1014         if (!stats)
1015                 return 0;
1016
1017         axgbe_read_mmc_stats(pdata);
1018
1019         for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
1020                 stats[i].id = i;
1021                 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
1022                                 axgbe_xstats_strings[i].offset);
1023         }
1024
1025         return i;
1026 }
1027
1028 static int
1029 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1030                            struct rte_eth_xstat_name *xstats_names,
1031                            unsigned int n)
1032 {
1033         unsigned int i;
1034
1035         if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
1036                 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
1037                         snprintf(xstats_names[i].name,
1038                                  RTE_ETH_XSTATS_NAME_SIZE, "%s",
1039                                  axgbe_xstats_strings[i].name);
1040                 }
1041         }
1042
1043         return AXGBE_XSTATS_COUNT;
1044 }
1045
1046 static int
1047 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1048                            uint64_t *values, unsigned int n)
1049 {
1050         unsigned int i;
1051         uint64_t values_copy[AXGBE_XSTATS_COUNT];
1052
1053         if (!ids) {
1054                 struct axgbe_port *pdata = dev->data->dev_private;
1055
1056                 if (n < AXGBE_XSTATS_COUNT)
1057                         return AXGBE_XSTATS_COUNT;
1058
1059                 axgbe_read_mmc_stats(pdata);
1060
1061                 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
1062                         values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
1063                                         axgbe_xstats_strings[i].offset);
1064                 }
1065
1066                 return i;
1067         }
1068
1069         axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
1070
1071         for (i = 0; i < n; i++) {
1072                 if (ids[i] >= AXGBE_XSTATS_COUNT) {
1073                         PMD_DRV_LOG(ERR, "id value isn't valid\n");
1074                         return -1;
1075                 }
1076                 values[i] = values_copy[ids[i]];
1077         }
1078         return n;
1079 }
1080
1081 static int
1082 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1083                                  const uint64_t *ids,
1084                                  struct rte_eth_xstat_name *xstats_names,
1085                                  unsigned int size)
1086 {
1087         struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
1088         unsigned int i;
1089
1090         if (!ids)
1091                 return axgbe_dev_xstats_get_names(dev, xstats_names, size);
1092
1093         axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
1094
1095         for (i = 0; i < size; i++) {
1096                 if (ids[i] >= AXGBE_XSTATS_COUNT) {
1097                         PMD_DRV_LOG(ERR, "id value isn't valid\n");
1098                         return -1;
1099                 }
1100                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1101         }
1102         return size;
1103 }
1104
1105 static int
1106 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1107 {
1108         struct axgbe_port *pdata = dev->data->dev_private;
1109         struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
1110
1111         /* MMC registers are configured for reset on read */
1112         axgbe_read_mmc_stats(pdata);
1113
1114         /* Reset stats */
1115         memset(stats, 0, sizeof(*stats));
1116
1117         return 0;
1118 }
1119
1120 static int
1121 axgbe_dev_stats_get(struct rte_eth_dev *dev,
1122                     struct rte_eth_stats *stats)
1123 {
1124         struct axgbe_rx_queue *rxq;
1125         struct axgbe_tx_queue *txq;
1126         struct axgbe_port *pdata = dev->data->dev_private;
1127         struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
1128         unsigned int i;
1129
1130         axgbe_read_mmc_stats(pdata);
1131
1132         stats->imissed = mmc_stats->rxfifooverflow;
1133
1134         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1135                 rxq = dev->data->rx_queues[i];
1136                 if (rxq) {
1137                         stats->q_ipackets[i] = rxq->pkts;
1138                         stats->ipackets += rxq->pkts;
1139                         stats->q_ibytes[i] = rxq->bytes;
1140                         stats->ibytes += rxq->bytes;
1141                         stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
1142                         stats->q_errors[i] = rxq->errors
1143                                 + rxq->rx_mbuf_alloc_failed;
1144                         stats->ierrors += rxq->errors;
1145                 } else {
1146                         PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
1147                                         dev->data->port_id);
1148                 }
1149         }
1150
1151         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1152                 txq = dev->data->tx_queues[i];
1153                 if (txq) {
1154                         stats->q_opackets[i] = txq->pkts;
1155                         stats->opackets += txq->pkts;
1156                         stats->q_obytes[i] = txq->bytes;
1157                         stats->obytes += txq->bytes;
1158                         stats->oerrors += txq->errors;
1159                 } else {
1160                         PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
1161                                         dev->data->port_id);
1162                 }
1163         }
1164
1165         return 0;
1166 }
1167
1168 static int
1169 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
1170 {
1171         struct axgbe_rx_queue *rxq;
1172         struct axgbe_tx_queue *txq;
1173         unsigned int i;
1174
1175         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1176                 rxq = dev->data->rx_queues[i];
1177                 if (rxq) {
1178                         rxq->pkts = 0;
1179                         rxq->bytes = 0;
1180                         rxq->errors = 0;
1181                         rxq->rx_mbuf_alloc_failed = 0;
1182                 } else {
1183                         PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
1184                                         dev->data->port_id);
1185                 }
1186         }
1187         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1188                 txq = dev->data->tx_queues[i];
1189                 if (txq) {
1190                         txq->pkts = 0;
1191                         txq->bytes = 0;
1192                         txq->errors = 0;
1193                 } else {
1194                         PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
1195                                         dev->data->port_id);
1196                 }
1197         }
1198
1199         return 0;
1200 }
1201
1202 static int
1203 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1204 {
1205         struct axgbe_port *pdata = dev->data->dev_private;
1206
1207         dev_info->max_rx_queues = pdata->rx_ring_count;
1208         dev_info->max_tx_queues = pdata->tx_ring_count;
1209         dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
1210         dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
1211         dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
1212         dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
1213         dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
1214
1215         dev_info->rx_offload_capa =
1216                 RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
1217                 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
1218                 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
1219                 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1220                 RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
1221                 RTE_ETH_RX_OFFLOAD_TCP_CKSUM  |
1222                 RTE_ETH_RX_OFFLOAD_SCATTER        |
1223                 RTE_ETH_RX_OFFLOAD_KEEP_CRC;
1224
1225         dev_info->tx_offload_capa =
1226                 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1227                 RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
1228                 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
1229                 RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
1230                 RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
1231
1232         if (pdata->hw_feat.rss) {
1233                 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
1234                 dev_info->reta_size = pdata->hw_feat.hash_table_size;
1235                 dev_info->hash_key_size =  AXGBE_RSS_HASH_KEY_SIZE;
1236         }
1237
1238         dev_info->rx_desc_lim = rx_desc_lim;
1239         dev_info->tx_desc_lim = tx_desc_lim;
1240
1241         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1242                 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
1243         };
1244
1245         dev_info->default_txconf = (struct rte_eth_txconf) {
1246                 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
1247         };
1248
1249         return 0;
1250 }
1251
1252 static int
1253 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1254 {
1255         struct axgbe_port *pdata = dev->data->dev_private;
1256         struct xgbe_fc_info fc = pdata->fc;
1257         unsigned int reg, reg_val = 0;
1258
1259         reg = MAC_Q0TFCR;
1260         reg_val = AXGMAC_IOREAD(pdata, reg);
1261         fc.low_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA);
1262         fc.high_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD);
1263         fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT);
1264         fc.autoneg = pdata->pause_autoneg;
1265
1266         if (pdata->rx_pause && pdata->tx_pause)
1267                 fc.mode = RTE_ETH_FC_FULL;
1268         else if (pdata->rx_pause)
1269                 fc.mode = RTE_ETH_FC_RX_PAUSE;
1270         else if (pdata->tx_pause)
1271                 fc.mode = RTE_ETH_FC_TX_PAUSE;
1272         else
1273                 fc.mode = RTE_ETH_FC_NONE;
1274
1275         fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
1276         fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
1277         fc_conf->pause_time = fc.pause_time[0];
1278         fc_conf->send_xon = fc.send_xon;
1279         fc_conf->mode = fc.mode;
1280
1281         return 0;
1282 }
1283
1284 static int
1285 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1286 {
1287         struct axgbe_port *pdata = dev->data->dev_private;
1288         struct xgbe_fc_info fc = pdata->fc;
1289         unsigned int reg, reg_val = 0;
1290         reg = MAC_Q0TFCR;
1291
1292         pdata->pause_autoneg = fc_conf->autoneg;
1293         pdata->phy.pause_autoneg = pdata->pause_autoneg;
1294         fc.send_xon = fc_conf->send_xon;
1295         AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA,
1296                         AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water));
1297         AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD,
1298                         AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water));
1299         AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time);
1300         AXGMAC_IOWRITE(pdata, reg, reg_val);
1301         fc.mode = fc_conf->mode;
1302
1303         if (fc.mode == RTE_ETH_FC_FULL) {
1304                 pdata->tx_pause = 1;
1305                 pdata->rx_pause = 1;
1306         } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
1307                 pdata->tx_pause = 0;
1308                 pdata->rx_pause = 1;
1309         } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
1310                 pdata->tx_pause = 1;
1311                 pdata->rx_pause = 0;
1312         } else {
1313                 pdata->tx_pause = 0;
1314                 pdata->rx_pause = 0;
1315         }
1316
1317         if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1318                 pdata->hw_if.config_tx_flow_control(pdata);
1319
1320         if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1321                 pdata->hw_if.config_rx_flow_control(pdata);
1322
1323         pdata->hw_if.config_flow_control(pdata);
1324         pdata->phy.tx_pause = pdata->tx_pause;
1325         pdata->phy.rx_pause = pdata->rx_pause;
1326
1327         return 0;
1328 }
1329
1330 static int
1331 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
1332                 struct rte_eth_pfc_conf *pfc_conf)
1333 {
1334         struct axgbe_port *pdata = dev->data->dev_private;
1335         struct xgbe_fc_info fc = pdata->fc;
1336         uint8_t tc_num;
1337
1338         tc_num = pdata->pfc_map[pfc_conf->priority];
1339
1340         if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) {
1341                 PMD_INIT_LOG(ERR, "Max supported  traffic class: %d\n",
1342                                 pdata->hw_feat.tc_cnt);
1343         return -EINVAL;
1344         }
1345
1346         pdata->pause_autoneg = pfc_conf->fc.autoneg;
1347         pdata->phy.pause_autoneg = pdata->pause_autoneg;
1348         fc.send_xon = pfc_conf->fc.send_xon;
1349         AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA,
1350                 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water));
1351         AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD,
1352                 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water));
1353
1354         switch (tc_num) {
1355         case 0:
1356                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1357                                 PSTC0, pfc_conf->fc.pause_time);
1358                 break;
1359         case 1:
1360                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1361                                 PSTC1, pfc_conf->fc.pause_time);
1362                 break;
1363         case 2:
1364                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1365                                 PSTC2, pfc_conf->fc.pause_time);
1366                 break;
1367         case 3:
1368                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1369                                 PSTC3, pfc_conf->fc.pause_time);
1370                 break;
1371         case 4:
1372                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1373                                 PSTC4, pfc_conf->fc.pause_time);
1374                 break;
1375         case 5:
1376                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1377                                 PSTC5, pfc_conf->fc.pause_time);
1378                 break;
1379         case 7:
1380                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1381                                 PSTC6, pfc_conf->fc.pause_time);
1382                 break;
1383         case 6:
1384                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1385                                 PSTC7, pfc_conf->fc.pause_time);
1386                 break;
1387         }
1388
1389         fc.mode = pfc_conf->fc.mode;
1390
1391         if (fc.mode == RTE_ETH_FC_FULL) {
1392                 pdata->tx_pause = 1;
1393                 pdata->rx_pause = 1;
1394                 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1395         } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
1396                 pdata->tx_pause = 0;
1397                 pdata->rx_pause = 1;
1398                 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1399         } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
1400                 pdata->tx_pause = 1;
1401                 pdata->rx_pause = 0;
1402                 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1403         } else {
1404                 pdata->tx_pause = 0;
1405                 pdata->rx_pause = 0;
1406                 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1407         }
1408
1409         if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1410                 pdata->hw_if.config_tx_flow_control(pdata);
1411
1412         if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1413                 pdata->hw_if.config_rx_flow_control(pdata);
1414         pdata->hw_if.config_flow_control(pdata);
1415         pdata->phy.tx_pause = pdata->tx_pause;
1416         pdata->phy.rx_pause = pdata->rx_pause;
1417
1418         return 0;
1419 }
1420
1421 void
1422 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1423         struct rte_eth_rxq_info *qinfo)
1424 {
1425         struct   axgbe_rx_queue *rxq;
1426
1427         rxq = dev->data->rx_queues[queue_id];
1428         qinfo->mp = rxq->mb_pool;
1429         qinfo->scattered_rx = dev->data->scattered_rx;
1430         qinfo->nb_desc = rxq->nb_desc;
1431         qinfo->conf.rx_free_thresh = rxq->free_thresh;
1432 }
1433
1434 void
1435 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1436         struct rte_eth_txq_info *qinfo)
1437 {
1438         struct  axgbe_tx_queue *txq;
1439
1440         txq = dev->data->tx_queues[queue_id];
1441         qinfo->nb_desc = txq->nb_desc;
1442         qinfo->conf.tx_free_thresh = txq->free_thresh;
1443 }
1444 const uint32_t *
1445 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1446 {
1447         static const uint32_t ptypes[] = {
1448                 RTE_PTYPE_L2_ETHER,
1449                 RTE_PTYPE_L2_ETHER_TIMESYNC,
1450                 RTE_PTYPE_L2_ETHER_LLDP,
1451                 RTE_PTYPE_L2_ETHER_ARP,
1452                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1453                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1454                 RTE_PTYPE_L4_FRAG,
1455                 RTE_PTYPE_L4_ICMP,
1456                 RTE_PTYPE_L4_NONFRAG,
1457                 RTE_PTYPE_L4_SCTP,
1458                 RTE_PTYPE_L4_TCP,
1459                 RTE_PTYPE_L4_UDP,
1460                 RTE_PTYPE_TUNNEL_GRENAT,
1461                 RTE_PTYPE_TUNNEL_IP,
1462                 RTE_PTYPE_INNER_L2_ETHER,
1463                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1464                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1465                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1466                 RTE_PTYPE_INNER_L4_FRAG,
1467                 RTE_PTYPE_INNER_L4_ICMP,
1468                 RTE_PTYPE_INNER_L4_NONFRAG,
1469                 RTE_PTYPE_INNER_L4_SCTP,
1470                 RTE_PTYPE_INNER_L4_TCP,
1471                 RTE_PTYPE_INNER_L4_UDP,
1472                 RTE_PTYPE_UNKNOWN
1473         };
1474
1475         if (dev->rx_pkt_burst == axgbe_recv_pkts)
1476                 return ptypes;
1477         return NULL;
1478 }
1479
1480 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1481 {
1482         struct axgbe_port *pdata = dev->data->dev_private;
1483         unsigned int val;
1484
1485         /* mtu setting is forbidden if port is start */
1486         if (dev->data->dev_started) {
1487                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1488                                 dev->data->port_id);
1489                 return -EBUSY;
1490         }
1491         val = mtu > RTE_ETHER_MTU ? 1 : 0;
1492         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1493
1494         return 0;
1495 }
1496
1497 static void
1498 axgbe_update_tstamp_time(struct axgbe_port *pdata,
1499                 unsigned int sec, unsigned int nsec, int addsub)
1500 {
1501         unsigned int count = 100;
1502         uint32_t sub_val = 0;
1503         uint32_t sub_val_sec = 0xFFFFFFFF;
1504         uint32_t sub_val_nsec = 0x3B9ACA00;
1505
1506         if (addsub) {
1507                 if (sec)
1508                         sub_val = sub_val_sec - (sec - 1);
1509                 else
1510                         sub_val = sec;
1511
1512                 AXGMAC_IOWRITE(pdata, MAC_STSUR, sub_val);
1513                 sub_val = sub_val_nsec - nsec;
1514                 AXGMAC_IOWRITE(pdata, MAC_STNUR, sub_val);
1515                 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 1);
1516         } else {
1517                 AXGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1518                 AXGMAC_IOWRITE_BITS(pdata, MAC_STNUR, ADDSUB, 0);
1519                 AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1520         }
1521         AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1);
1522         /* Wait for time update to complete */
1523         while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
1524                 rte_delay_ms(1);
1525 }
1526
1527 static inline uint64_t
1528 div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
1529 {
1530         *remainder = dividend % divisor;
1531         return dividend / divisor;
1532 }
1533
1534 static inline uint64_t
1535 div_u64(uint64_t dividend, uint32_t divisor)
1536 {
1537         uint32_t remainder;
1538         return div_u64_rem(dividend, divisor, &remainder);
1539 }
1540
1541 static int
1542 axgbe_adjfreq(struct axgbe_port *pdata, int64_t delta)
1543 {
1544         uint64_t adjust;
1545         uint32_t addend, diff;
1546         unsigned int neg_adjust = 0;
1547
1548         if (delta < 0) {
1549                 neg_adjust = 1;
1550                 delta = -delta;
1551         }
1552         adjust = (uint64_t)pdata->tstamp_addend;
1553         adjust *= delta;
1554         diff = (uint32_t)div_u64(adjust, 1000000000UL);
1555         addend = (neg_adjust) ? pdata->tstamp_addend - diff :
1556                                 pdata->tstamp_addend + diff;
1557         pdata->tstamp_addend = addend;
1558         axgbe_update_tstamp_addend(pdata, addend);
1559         return 0;
1560 }
1561
1562 static int
1563 axgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
1564 {
1565         struct axgbe_port *pdata = dev->data->dev_private;
1566         struct timespec timestamp_delta;
1567
1568         axgbe_adjfreq(pdata, delta);
1569         pdata->systime_tc.nsec += delta;
1570
1571         if (delta < 0) {
1572                 delta = -delta;
1573                 timestamp_delta = rte_ns_to_timespec(delta);
1574                 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec,
1575                                 timestamp_delta.tv_nsec, 1);
1576         } else {
1577                 timestamp_delta = rte_ns_to_timespec(delta);
1578                 axgbe_update_tstamp_time(pdata, timestamp_delta.tv_sec,
1579                                 timestamp_delta.tv_nsec, 0);
1580         }
1581         return 0;
1582 }
1583
1584 static int
1585 axgbe_timesync_read_time(struct rte_eth_dev *dev,
1586                 struct timespec *timestamp)
1587 {
1588         uint64_t nsec;
1589         struct axgbe_port *pdata = dev->data->dev_private;
1590
1591         nsec = AXGMAC_IOREAD(pdata, MAC_STSR);
1592         nsec *= NSEC_PER_SEC;
1593         nsec += AXGMAC_IOREAD(pdata, MAC_STNR);
1594         *timestamp = rte_ns_to_timespec(nsec);
1595         return 0;
1596 }
1597 static int
1598 axgbe_timesync_write_time(struct rte_eth_dev *dev,
1599                                     const struct timespec *timestamp)
1600 {
1601         unsigned int count = 100;
1602         struct axgbe_port *pdata = dev->data->dev_private;
1603
1604         AXGMAC_IOWRITE(pdata, MAC_STSUR, timestamp->tv_sec);
1605         AXGMAC_IOWRITE(pdata, MAC_STNUR, timestamp->tv_nsec);
1606         AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSUPDT, 1);
1607         /* Wait for time update to complete */
1608         while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSUPDT))
1609                 rte_delay_ms(1);
1610         if (!count)
1611                 PMD_DRV_LOG(ERR, "Timed out update timestamp\n");
1612         return 0;
1613 }
1614
1615 static void
1616 axgbe_update_tstamp_addend(struct axgbe_port *pdata,
1617                 uint32_t addend)
1618 {
1619         unsigned int count = 100;
1620
1621         AXGMAC_IOWRITE(pdata, MAC_TSAR, addend);
1622         AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
1623
1624         /* Wait for addend update to complete */
1625         while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
1626                 rte_delay_ms(1);
1627         if (!count)
1628                 PMD_DRV_LOG(ERR, "Timed out updating timestamp addend register\n");
1629 }
1630
1631 static void
1632 axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec,
1633                 unsigned int nsec)
1634 {
1635         unsigned int count = 100;
1636
1637         /*System Time Sec Update*/
1638         AXGMAC_IOWRITE(pdata, MAC_STSUR, sec);
1639         /*System Time nanoSec Update*/
1640         AXGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
1641         /*Initialize Timestamp*/
1642         AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
1643
1644         /* Wait for time update to complete */
1645         while (--count && AXGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
1646                 rte_delay_ms(1);
1647         if (!count)
1648                 PMD_DRV_LOG(ERR, "Timed out initializing timestamp\n");
1649 }
1650
1651 static int
1652 axgbe_timesync_enable(struct rte_eth_dev *dev)
1653 {
1654         struct axgbe_port *pdata = dev->data->dev_private;
1655         unsigned int mac_tscr = 0;
1656         uint64_t dividend;
1657         struct timespec timestamp;
1658         uint64_t nsec;
1659
1660         /* Set one nano-second accuracy */
1661         AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
1662
1663         /* Set fine timestamp update */
1664         AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
1665
1666         /* Overwrite earlier timestamps */
1667         AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
1668
1669         AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1670
1671         /* Enabling processing of ptp over eth pkt */
1672         AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
1673         AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
1674         /* Enable timestamp for all pkts*/
1675         AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
1676
1677         /* enabling timestamp */
1678         AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
1679         AXGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
1680
1681         /* Exit if timestamping is not enabled */
1682         if (!AXGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA)) {
1683                 PMD_DRV_LOG(ERR, "Exiting as timestamp is not enabled\n");
1684                 return 0;
1685         }
1686
1687         /* Sub-second Increment Value*/
1688         AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, AXGBE_TSTAMP_SSINC);
1689         /* Sub-nanosecond Increment Value */
1690         AXGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, AXGBE_TSTAMP_SNSINC);
1691
1692         pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1693         dividend = 50000000;
1694         dividend <<= 32;
1695         pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
1696
1697         axgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
1698         axgbe_set_tstamp_time(pdata, 0, 0);
1699
1700         /* Initialize the timecounter */
1701         memset(&pdata->systime_tc, 0, sizeof(struct rte_timecounter));
1702
1703         pdata->systime_tc.cc_mask = AXGBE_CYCLECOUNTER_MASK;
1704         pdata->systime_tc.cc_shift = 0;
1705         pdata->systime_tc.nsec_mask = 0;
1706
1707         PMD_DRV_LOG(DEBUG, "Initializing system time counter with realtime\n");
1708
1709         /* Updating the counter once with clock real time */
1710         clock_gettime(CLOCK_REALTIME, &timestamp);
1711         nsec = rte_timespec_to_ns(&timestamp);
1712         nsec = rte_timecounter_update(&pdata->systime_tc, nsec);
1713         axgbe_set_tstamp_time(pdata, timestamp.tv_sec, timestamp.tv_nsec);
1714         return 0;
1715 }
1716
1717 static int
1718 axgbe_timesync_disable(struct rte_eth_dev *dev)
1719 {
1720         struct axgbe_port *pdata = dev->data->dev_private;
1721         unsigned int mac_tscr = 0;
1722
1723         /*disable timestamp for all pkts*/
1724         AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 0);
1725         /*disable the addened register*/
1726         AXGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 0);
1727         /* disable timestamp update */
1728         AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 0);
1729         /*disable time stamp*/
1730         AXGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 0);
1731         return 0;
1732 }
1733
1734 static int
1735 axgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
1736                                 struct timespec *timestamp, uint32_t flags)
1737 {
1738         uint64_t nsec = 0;
1739         volatile union axgbe_rx_desc *desc;
1740         uint16_t idx, pmt;
1741         struct axgbe_rx_queue *rxq = *dev->data->rx_queues;
1742
1743         idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
1744         desc = &rxq->desc[idx];
1745
1746         while (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
1747                 rte_delay_ms(1);
1748         if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CTXT)) {
1749                 if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_CONTEXT_DESC3, TSA) &&
1750                                 !AXGMAC_GET_BITS_LE(desc->write.desc3,
1751                                         RX_CONTEXT_DESC3, TSD)) {
1752                         pmt = AXGMAC_GET_BITS_LE(desc->write.desc3,
1753                                         RX_CONTEXT_DESC3, PMT);
1754                         nsec = rte_le_to_cpu_32(desc->write.desc1);
1755                         nsec *= NSEC_PER_SEC;
1756                         nsec += rte_le_to_cpu_32(desc->write.desc0);
1757                         if (nsec != 0xffffffffffffffffULL) {
1758                                 if (pmt == 0x01)
1759                                         *timestamp = rte_ns_to_timespec(nsec);
1760                                 PMD_DRV_LOG(DEBUG,
1761                                         "flags = 0x%x nsec = %"PRIu64"\n",
1762                                         flags, nsec);
1763                         }
1764                 }
1765         }
1766
1767         return 0;
1768 }
1769
1770 static int
1771 axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
1772                                 struct timespec *timestamp)
1773 {
1774         uint64_t nsec;
1775         struct axgbe_port *pdata = dev->data->dev_private;
1776         unsigned int tx_snr, tx_ssr;
1777
1778         rte_delay_us(5);
1779         if (pdata->vdata->tx_tstamp_workaround) {
1780                 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR);
1781                 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR);
1782
1783         } else {
1784                 tx_ssr = AXGMAC_IOREAD(pdata, MAC_TXSSR);
1785                 tx_snr = AXGMAC_IOREAD(pdata, MAC_TXSNR);
1786         }
1787         if (AXGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS)) {
1788                 PMD_DRV_LOG(DEBUG, "Waiting for TXTSSTSMIS\n");
1789                 return 0;
1790         }
1791         nsec = tx_ssr;
1792         nsec *= NSEC_PER_SEC;
1793         nsec += tx_snr;
1794         PMD_DRV_LOG(DEBUG, "nsec = %"PRIu64" tx_ssr = %d tx_snr = %d\n",
1795                         nsec, tx_ssr, tx_snr);
1796         *timestamp = rte_ns_to_timespec(nsec);
1797         return 0;
1798 }
1799
1800 static int
1801 axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1802 {
1803         struct axgbe_port *pdata = dev->data->dev_private;
1804         unsigned long vid_bit, vid_idx;
1805
1806         vid_bit = VLAN_TABLE_BIT(vid);
1807         vid_idx = VLAN_TABLE_IDX(vid);
1808
1809         if (on) {
1810                 PMD_DRV_LOG(DEBUG, "Set VLAN vid=%d for device = %s\n",
1811                             vid, pdata->eth_dev->device->name);
1812                 pdata->active_vlans[vid_idx] |= vid_bit;
1813         } else {
1814                 PMD_DRV_LOG(DEBUG, "Reset VLAN vid=%d for device = %s\n",
1815                             vid, pdata->eth_dev->device->name);
1816                 pdata->active_vlans[vid_idx] &= ~vid_bit;
1817         }
1818         pdata->hw_if.update_vlan_hash_table(pdata);
1819         return 0;
1820 }
1821
1822 static int
1823 axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
1824                     enum rte_vlan_type vlan_type,
1825                     uint16_t tpid)
1826 {
1827         struct axgbe_port *pdata = dev->data->dev_private;
1828         uint32_t reg = 0;
1829         uint32_t qinq = 0;
1830
1831         qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
1832         PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
1833
1834         switch (vlan_type) {
1835         case RTE_ETH_VLAN_TYPE_INNER:
1836                 PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
1837                 if (qinq) {
1838                         if (tpid != 0x8100 && tpid != 0x88a8)
1839                                 PMD_DRV_LOG(ERR,
1840                                             "tag supported 0x8100/0x88A8\n");
1841                         PMD_DRV_LOG(DEBUG, "qinq with inner tag\n");
1842
1843                         /*Enable Inner VLAN Tag */
1844                         AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 1);
1845                         reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
1846                         PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
1847
1848                 } else {
1849                         PMD_DRV_LOG(ERR,
1850                                     "Inner type not supported in single tag\n");
1851                 }
1852                 break;
1853         case RTE_ETH_VLAN_TYPE_OUTER:
1854                 PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
1855                 if (qinq) {
1856                         PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
1857                         /*Enable outer VLAN tag*/
1858                         AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 0);
1859                         reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
1860                         PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
1861
1862                         AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 1);
1863                         reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANIR, CSVL);
1864                         PMD_DRV_LOG(DEBUG, "bit CSVL = 0x%x\n", reg);
1865                 } else {
1866                         if (tpid != 0x8100 && tpid != 0x88a8)
1867                                 PMD_DRV_LOG(ERR,
1868                                             "tag supported 0x8100/0x88A8\n");
1869                 }
1870                 break;
1871         case RTE_ETH_VLAN_TYPE_MAX:
1872                 PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
1873                 break;
1874         case RTE_ETH_VLAN_TYPE_UNKNOWN:
1875                 PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
1876                 break;
1877         }
1878         return 0;
1879 }
1880
1881 static void axgbe_vlan_extend_enable(struct axgbe_port *pdata)
1882 {
1883         int qinq = 0;
1884
1885         AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 1);
1886         qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
1887         PMD_DRV_LOG(DEBUG, "vlan double tag enabled EDVLP:qinq=0x%x\n", qinq);
1888 }
1889
1890 static void axgbe_vlan_extend_disable(struct axgbe_port *pdata)
1891 {
1892         int qinq = 0;
1893
1894         AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 0);
1895         qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
1896         PMD_DRV_LOG(DEBUG, "vlan double tag disable EDVLP:qinq=0x%x\n", qinq);
1897 }
1898
1899 static int
1900 axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1901 {
1902         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1903         struct axgbe_port *pdata = dev->data->dev_private;
1904
1905         /* Indicate that VLAN Tx CTAGs come from context descriptors */
1906         AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
1907         AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
1908
1909         if (mask & RTE_ETH_VLAN_STRIP_MASK) {
1910                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
1911                         PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
1912                                     pdata->eth_dev->device->name);
1913                         pdata->hw_if.enable_rx_vlan_stripping(pdata);
1914                 } else {
1915                         PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n",
1916                                     pdata->eth_dev->device->name);
1917                         pdata->hw_if.disable_rx_vlan_stripping(pdata);
1918                 }
1919         }
1920         if (mask & RTE_ETH_VLAN_FILTER_MASK) {
1921                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
1922                         PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
1923                                     pdata->eth_dev->device->name);
1924                         pdata->hw_if.enable_rx_vlan_filtering(pdata);
1925                 } else {
1926                         PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n",
1927                                     pdata->eth_dev->device->name);
1928                         pdata->hw_if.disable_rx_vlan_filtering(pdata);
1929                 }
1930         }
1931         if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
1932                 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
1933                         PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
1934                         axgbe_vlan_extend_enable(pdata);
1935                         /* Set global registers with default ethertype*/
1936                         axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
1937                                             RTE_ETHER_TYPE_VLAN);
1938                         axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
1939                                             RTE_ETHER_TYPE_VLAN);
1940                 } else {
1941                         PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
1942                         axgbe_vlan_extend_disable(pdata);
1943                 }
1944         }
1945         return 0;
1946 }
1947
1948 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
1949 {
1950         unsigned int mac_hfr0, mac_hfr1, mac_hfr2, mac_hfr3;
1951         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1952
1953         mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
1954         mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
1955         mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
1956         mac_hfr3 = AXGMAC_IOREAD(pdata, MAC_HWF3R);
1957
1958         memset(hw_feat, 0, sizeof(*hw_feat));
1959
1960         hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
1961
1962         /* Hardware feature register 0 */
1963         hw_feat->gmii        = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
1964         hw_feat->vlhash      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
1965         hw_feat->sma         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
1966         hw_feat->rwk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
1967         hw_feat->mgk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
1968         hw_feat->mmc         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
1969         hw_feat->aoe         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
1970         hw_feat->ts          = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
1971         hw_feat->eee         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
1972         hw_feat->tx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
1973         hw_feat->rx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
1974         hw_feat->addn_mac    = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
1975                                               ADDMACADRSEL);
1976         hw_feat->ts_src      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
1977         hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
1978
1979         /* Hardware feature register 1 */
1980         hw_feat->rx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1981                                                 RXFIFOSIZE);
1982         hw_feat->tx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1983                                                 TXFIFOSIZE);
1984         hw_feat->adv_ts_hi     = AXGMAC_GET_BITS(mac_hfr1,
1985                                                  MAC_HWF1R, ADVTHWORD);
1986         hw_feat->dma_width     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
1987         hw_feat->dcb           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
1988         hw_feat->sph           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
1989         hw_feat->tso           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
1990         hw_feat->dma_debug     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
1991         hw_feat->rss           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
1992         hw_feat->tc_cnt        = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
1993         hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1994                                                   HASHTBLSZ);
1995         hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1996                                                   L3L4FNUM);
1997
1998         /* Hardware feature register 2 */
1999         hw_feat->rx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
2000         hw_feat->tx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
2001         hw_feat->rx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
2002         hw_feat->tx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
2003         hw_feat->pps_out_num  = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
2004         hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
2005                                                 AUXSNAPNUM);
2006
2007         /* Hardware feature register 3 */
2008         hw_feat->tx_q_vlan_tag_ins  = AXGMAC_GET_BITS(mac_hfr3,
2009                                                       MAC_HWF3R, CBTISEL);
2010         hw_feat->no_of_vlan_extn    = AXGMAC_GET_BITS(mac_hfr3,
2011                                                       MAC_HWF3R, NRVF);
2012
2013         /* Translate the Hash Table size into actual number */
2014         switch (hw_feat->hash_table_size) {
2015         case 0:
2016                 break;
2017         case 1:
2018                 hw_feat->hash_table_size = 64;
2019                 break;
2020         case 2:
2021                 hw_feat->hash_table_size = 128;
2022                 break;
2023         case 3:
2024                 hw_feat->hash_table_size = 256;
2025                 break;
2026         }
2027
2028         /* Translate the address width setting into actual number */
2029         switch (hw_feat->dma_width) {
2030         case 0:
2031                 hw_feat->dma_width = 32;
2032                 break;
2033         case 1:
2034                 hw_feat->dma_width = 40;
2035                 break;
2036         case 2:
2037                 hw_feat->dma_width = 48;
2038                 break;
2039         default:
2040                 hw_feat->dma_width = 32;
2041         }
2042
2043         /* The Queue, Channel and TC counts are zero based so increment them
2044          * to get the actual number
2045          */
2046         hw_feat->rx_q_cnt++;
2047         hw_feat->tx_q_cnt++;
2048         hw_feat->rx_ch_cnt++;
2049         hw_feat->tx_ch_cnt++;
2050         hw_feat->tc_cnt++;
2051
2052         /* Translate the fifo sizes into actual numbers */
2053         hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
2054         hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
2055 }
2056
2057 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
2058 {
2059         axgbe_init_function_ptrs_dev(&pdata->hw_if);
2060         axgbe_init_function_ptrs_phy(&pdata->phy_if);
2061         axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
2062         pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
2063 }
2064
2065 static void axgbe_set_counts(struct axgbe_port *pdata)
2066 {
2067         /* Set all the function pointers */
2068         axgbe_init_all_fptrs(pdata);
2069
2070         /* Populate the hardware features */
2071         axgbe_get_all_hw_features(pdata);
2072
2073         /* Set default max values if not provided */
2074         if (!pdata->tx_max_channel_count)
2075                 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
2076         if (!pdata->rx_max_channel_count)
2077                 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
2078
2079         if (!pdata->tx_max_q_count)
2080                 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
2081         if (!pdata->rx_max_q_count)
2082                 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
2083
2084         /* Calculate the number of Tx and Rx rings to be created
2085          *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
2086          *   the number of Tx queues to the number of Tx channels
2087          *   enabled
2088          *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
2089          *   number of Rx queues or maximum allowed
2090          */
2091         pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
2092                                      pdata->tx_max_channel_count);
2093         pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
2094                                      pdata->tx_max_q_count);
2095
2096         pdata->tx_q_count = pdata->tx_ring_count;
2097
2098         pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
2099                                      pdata->rx_max_channel_count);
2100
2101         pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
2102                                   pdata->rx_max_q_count);
2103 }
2104
2105 static void axgbe_default_config(struct axgbe_port *pdata)
2106 {
2107         pdata->pblx8 = DMA_PBL_X8_ENABLE;
2108         pdata->tx_sf_mode = MTL_TSF_ENABLE;
2109         pdata->tx_threshold = MTL_TX_THRESHOLD_64;
2110         pdata->tx_pbl = DMA_PBL_32;
2111         pdata->tx_osp_mode = DMA_OSP_ENABLE;
2112         pdata->rx_sf_mode = MTL_RSF_ENABLE;
2113         pdata->rx_threshold = MTL_RX_THRESHOLD_64;
2114         pdata->rx_pbl = DMA_PBL_32;
2115         pdata->pause_autoneg = 1;
2116         pdata->tx_pause = 0;
2117         pdata->rx_pause = 0;
2118         pdata->phy_speed = SPEED_UNKNOWN;
2119         pdata->power_down = 0;
2120 }
2121
2122 /*
2123  * Return PCI root complex device id on success else 0
2124  */
2125 static uint16_t
2126 get_pci_rc_devid(void)
2127 {
2128         char pci_sysfs[PATH_MAX];
2129         const struct rte_pci_addr pci_rc_addr = {0, 0, 0, 0};
2130         unsigned long device_id;
2131
2132         snprintf(pci_sysfs, sizeof(pci_sysfs), "%s/" PCI_PRI_FMT "/device",
2133                  rte_pci_get_sysfs_path(), pci_rc_addr.domain,
2134                  pci_rc_addr.bus, pci_rc_addr.devid, pci_rc_addr.function);
2135
2136         /* get device id */
2137         if (eal_parse_sysfs_value(pci_sysfs, &device_id) < 0) {
2138                 PMD_INIT_LOG(ERR, "Error in reading PCI sysfs\n");
2139                 return 0;
2140         }
2141
2142         return (uint16_t)device_id;
2143 }
2144
2145 /*
2146  * It returns 0 on success.
2147  */
2148 static int
2149 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
2150 {
2151         PMD_INIT_FUNC_TRACE();
2152         struct axgbe_port *pdata;
2153         struct rte_pci_device *pci_dev;
2154         uint32_t reg, mac_lo, mac_hi;
2155         uint32_t len;
2156         int ret;
2157
2158         eth_dev->dev_ops = &axgbe_eth_dev_ops;
2159
2160         eth_dev->rx_descriptor_status = axgbe_dev_rx_descriptor_status;
2161         eth_dev->tx_descriptor_status = axgbe_dev_tx_descriptor_status;
2162
2163         /*
2164          * For secondary processes, we don't initialise any further as primary
2165          * has already done this work.
2166          */
2167         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2168                 return 0;
2169
2170         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2171
2172         pdata = eth_dev->data->dev_private;
2173         /* initial state */
2174         rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
2175         rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
2176         pdata->eth_dev = eth_dev;
2177
2178         pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2179         pdata->pci_dev = pci_dev;
2180
2181         /*
2182          * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
2183          */
2184         if ((get_pci_rc_devid()) == AMD_PCI_RV_ROOT_COMPLEX_ID) {
2185                 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
2186                 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
2187         } else {
2188                 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
2189                 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
2190         }
2191
2192         pdata->xgmac_regs =
2193                 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
2194         pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
2195                                      + AXGBE_MAC_PROP_OFFSET);
2196         pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
2197                                     + AXGBE_I2C_CTRL_OFFSET);
2198         pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
2199
2200         /* version specific driver data*/
2201         if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
2202                 pdata->vdata = &axgbe_v2a;
2203         else
2204                 pdata->vdata = &axgbe_v2b;
2205
2206         /* Configure the PCS indirect addressing support */
2207         reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
2208         pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
2209         pdata->xpcs_window <<= 6;
2210         pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
2211         pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
2212         pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
2213
2214         PMD_INIT_LOG(DEBUG,
2215                      "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
2216                      pdata->xpcs_window_size, pdata->xpcs_window_mask);
2217         XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
2218
2219         /* Retrieve the MAC address */
2220         mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
2221         mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
2222         pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
2223         pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
2224         pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
2225         pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
2226         pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
2227         pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
2228
2229         len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS;
2230         eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0);
2231
2232         if (!eth_dev->data->mac_addrs) {
2233                 PMD_INIT_LOG(ERR,
2234                              "Failed to alloc %u bytes needed to "
2235                              "store MAC addresses", len);
2236                 return -ENOMEM;
2237         }
2238
2239         /* Allocate memory for storing hash filter MAC addresses */
2240         len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS;
2241         eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr",
2242                                                     len, 0);
2243
2244         if (eth_dev->data->hash_mac_addrs == NULL) {
2245                 PMD_INIT_LOG(ERR,
2246                              "Failed to allocate %d bytes needed to "
2247                              "store MAC addresses", len);
2248                 return -ENOMEM;
2249         }
2250
2251         if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
2252                 rte_eth_random_addr(pdata->mac_addr.addr_bytes);
2253
2254         /* Copy the permanent MAC address */
2255         rte_ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
2256
2257         /* Clock settings */
2258         pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
2259         pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
2260
2261         /* Set the DMA coherency values */
2262         pdata->coherent = 1;
2263         pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
2264         pdata->arcache = AXGBE_DMA_OS_ARCACHE;
2265         pdata->awcache = AXGBE_DMA_OS_AWCACHE;
2266
2267         /* Set the maximum channels and queues */
2268         reg = XP_IOREAD(pdata, XP_PROP_1);
2269         pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
2270         pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
2271         pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
2272         pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
2273
2274         /* Set the hardware channel and queue counts */
2275         axgbe_set_counts(pdata);
2276
2277         /* Set the maximum fifo amounts */
2278         reg = XP_IOREAD(pdata, XP_PROP_2);
2279         pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
2280         pdata->tx_max_fifo_size *= 16384;
2281         pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
2282                                           pdata->vdata->tx_max_fifo_size);
2283         pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
2284         pdata->rx_max_fifo_size *= 16384;
2285         pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
2286                                           pdata->vdata->rx_max_fifo_size);
2287         /* Issue software reset to DMA */
2288         ret = pdata->hw_if.exit(pdata);
2289         if (ret)
2290                 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
2291
2292         /* Set default configuration data */
2293         axgbe_default_config(pdata);
2294
2295         /* Set default max values if not provided */
2296         if (!pdata->tx_max_fifo_size)
2297                 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
2298         if (!pdata->rx_max_fifo_size)
2299                 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
2300
2301         pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
2302         pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
2303         pthread_mutex_init(&pdata->xpcs_mutex, NULL);
2304         pthread_mutex_init(&pdata->i2c_mutex, NULL);
2305         pthread_mutex_init(&pdata->an_mutex, NULL);
2306         pthread_mutex_init(&pdata->phy_mutex, NULL);
2307
2308         ret = pdata->phy_if.phy_init(pdata);
2309         if (ret) {
2310                 rte_free(eth_dev->data->mac_addrs);
2311                 eth_dev->data->mac_addrs = NULL;
2312                 return ret;
2313         }
2314
2315         rte_intr_callback_register(pci_dev->intr_handle,
2316                                    axgbe_dev_interrupt_handler,
2317                                    (void *)eth_dev);
2318         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
2319                      eth_dev->data->port_id, pci_dev->id.vendor_id,
2320                      pci_dev->id.device_id);
2321
2322         return 0;
2323 }
2324
2325 static int
2326 axgbe_dev_close(struct rte_eth_dev *eth_dev)
2327 {
2328         struct rte_pci_device *pci_dev;
2329
2330         PMD_INIT_FUNC_TRACE();
2331
2332         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2333                 return 0;
2334
2335         pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
2336         axgbe_dev_clear_queues(eth_dev);
2337
2338         /* disable uio intr before callback unregister */
2339         rte_intr_disable(pci_dev->intr_handle);
2340         rte_intr_callback_unregister(pci_dev->intr_handle,
2341                                      axgbe_dev_interrupt_handler,
2342                                      (void *)eth_dev);
2343
2344         return 0;
2345 }
2346
2347 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2348         struct rte_pci_device *pci_dev)
2349 {
2350         return rte_eth_dev_pci_generic_probe(pci_dev,
2351                 sizeof(struct axgbe_port), eth_axgbe_dev_init);
2352 }
2353
2354 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
2355 {
2356         return rte_eth_dev_pci_generic_remove(pci_dev, axgbe_dev_close);
2357 }
2358
2359 static struct rte_pci_driver rte_axgbe_pmd = {
2360         .id_table = pci_id_axgbe_map,
2361         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2362         .probe = eth_axgbe_pci_probe,
2363         .remove = eth_axgbe_pci_remove,
2364 };
2365
2366 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
2367 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
2368 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
2369 RTE_LOG_REGISTER_SUFFIX(axgbe_logtype_init, init, NOTICE);
2370 RTE_LOG_REGISTER_SUFFIX(axgbe_logtype_driver, driver, NOTICE);