log: introduce logtype register macro
[dpdk.git] / drivers / net / axgbe / axgbe_ethdev.c
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
3  *   Copyright(c) 2018 Synopsys, Inc. All rights reserved.
4  */
5
6 #include "axgbe_rxtx.h"
7 #include "axgbe_ethdev.h"
8 #include "axgbe_common.h"
9 #include "axgbe_phy.h"
10 #include "axgbe_regs.h"
11
12 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
13 static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
14 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
15 static int  axgbe_dev_start(struct rte_eth_dev *dev);
16 static void axgbe_dev_stop(struct rte_eth_dev *dev);
17 static void axgbe_dev_interrupt_handler(void *param);
18 static void axgbe_dev_close(struct rte_eth_dev *dev);
19 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
20 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
21 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
22 static int axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
23 static int axgbe_dev_mac_addr_set(struct rte_eth_dev *dev,
24                                   struct rte_ether_addr *mac_addr);
25 static int axgbe_dev_mac_addr_add(struct rte_eth_dev *dev,
26                                   struct rte_ether_addr *mac_addr,
27                                   uint32_t index,
28                                   uint32_t vmdq);
29 static void axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
30 static int axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
31                                       struct rte_ether_addr *mc_addr_set,
32                                       uint32_t nb_mc_addr);
33 static int axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
34                                        struct rte_ether_addr *mac_addr,
35                                        uint8_t add);
36 static int axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev,
37                                            uint8_t add);
38 static int axgbe_dev_link_update(struct rte_eth_dev *dev,
39                                  int wait_to_complete);
40 static int axgbe_dev_get_regs(struct rte_eth_dev *dev,
41                               struct rte_dev_reg_info *regs);
42 static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
43                                 struct rte_eth_stats *stats);
44 static int axgbe_dev_stats_reset(struct rte_eth_dev *dev);
45 static int axgbe_dev_xstats_get(struct rte_eth_dev *dev,
46                                 struct rte_eth_xstat *stats,
47                                 unsigned int n);
48 static int
49 axgbe_dev_xstats_get_names(struct rte_eth_dev *dev,
50                            struct rte_eth_xstat_name *xstats_names,
51                            unsigned int size);
52 static int
53 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
54                            const uint64_t *ids,
55                            uint64_t *values,
56                            unsigned int n);
57 static int
58 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
59                                  struct rte_eth_xstat_name *xstats_names,
60                                  const uint64_t *ids,
61                                  unsigned int size);
62 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
63 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
64                           struct rte_eth_rss_reta_entry64 *reta_conf,
65                           uint16_t reta_size);
66 static int axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
67                          struct rte_eth_rss_reta_entry64 *reta_conf,
68                          uint16_t reta_size);
69 static int axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
70                                      struct rte_eth_rss_conf *rss_conf);
71 static int axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
72                                        struct rte_eth_rss_conf *rss_conf);
73 static int  axgbe_dev_info_get(struct rte_eth_dev *dev,
74                                struct rte_eth_dev_info *dev_info);
75 static int axgbe_flow_ctrl_get(struct rte_eth_dev *dev,
76                                 struct rte_eth_fc_conf *fc_conf);
77 static int axgbe_flow_ctrl_set(struct rte_eth_dev *dev,
78                                 struct rte_eth_fc_conf *fc_conf);
79 static int axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
80                                 struct rte_eth_pfc_conf *pfc_conf);
81 static void axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
82         struct rte_eth_rxq_info *qinfo);
83 static void axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
84         struct rte_eth_txq_info *qinfo);
85 const uint32_t *axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev);
86 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
87
88 struct axgbe_xstats {
89         char name[RTE_ETH_XSTATS_NAME_SIZE];
90         int offset;
91 };
92
93 #define AXGMAC_MMC_STAT(_string, _var)                           \
94         { _string,                                              \
95           offsetof(struct axgbe_mmc_stats, _var),       \
96         }
97
98 static const struct axgbe_xstats axgbe_xstats_strings[] = {
99         AXGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
100         AXGMAC_MMC_STAT("tx_packets", txframecount_gb),
101         AXGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
102         AXGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
103         AXGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
104         AXGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
105         AXGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
106         AXGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
107         AXGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
108         AXGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
109         AXGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
110         AXGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
111         AXGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
112         AXGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
113
114         AXGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
115         AXGMAC_MMC_STAT("rx_packets", rxframecount_gb),
116         AXGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
117         AXGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
118         AXGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
119         AXGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
120         AXGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
121         AXGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
122         AXGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
123         AXGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
124         AXGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
125         AXGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
126         AXGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
127         AXGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
128         AXGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
129         AXGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
130         AXGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
131         AXGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
132         AXGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
133         AXGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
134         AXGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
135         AXGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
136 };
137
138 #define AXGBE_XSTATS_COUNT        ARRAY_SIZE(axgbe_xstats_strings)
139
140 /* The set of PCI devices this driver supports */
141 #define AMD_PCI_VENDOR_ID       0x1022
142 #define AMD_PCI_RV_ROOT_COMPLEX_ID      0x15d0
143 #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
144 #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
145
146 static const struct rte_pci_id pci_id_axgbe_map[] = {
147         {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
148         {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
149         { .vendor_id = 0, },
150 };
151
152 static struct axgbe_version_data axgbe_v2a = {
153         .init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
154         .xpcs_access                    = AXGBE_XPCS_ACCESS_V2,
155         .mmc_64bit                      = 1,
156         .tx_max_fifo_size               = 229376,
157         .rx_max_fifo_size               = 229376,
158         .tx_tstamp_workaround           = 1,
159         .ecc_support                    = 1,
160         .i2c_support                    = 1,
161         .an_cdr_workaround              = 1,
162 };
163
164 static struct axgbe_version_data axgbe_v2b = {
165         .init_function_ptrs_phy_impl    = axgbe_init_function_ptrs_phy_v2,
166         .xpcs_access                    = AXGBE_XPCS_ACCESS_V2,
167         .mmc_64bit                      = 1,
168         .tx_max_fifo_size               = 65536,
169         .rx_max_fifo_size               = 65536,
170         .tx_tstamp_workaround           = 1,
171         .ecc_support                    = 1,
172         .i2c_support                    = 1,
173         .an_cdr_workaround              = 1,
174 };
175
176 static const struct rte_eth_desc_lim rx_desc_lim = {
177         .nb_max = AXGBE_MAX_RING_DESC,
178         .nb_min = AXGBE_MIN_RING_DESC,
179         .nb_align = 8,
180 };
181
182 static const struct rte_eth_desc_lim tx_desc_lim = {
183         .nb_max = AXGBE_MAX_RING_DESC,
184         .nb_min = AXGBE_MIN_RING_DESC,
185         .nb_align = 8,
186 };
187
188 static const struct eth_dev_ops axgbe_eth_dev_ops = {
189         .dev_configure        = axgbe_dev_configure,
190         .dev_start            = axgbe_dev_start,
191         .dev_stop             = axgbe_dev_stop,
192         .dev_close            = axgbe_dev_close,
193         .promiscuous_enable   = axgbe_dev_promiscuous_enable,
194         .promiscuous_disable  = axgbe_dev_promiscuous_disable,
195         .allmulticast_enable  = axgbe_dev_allmulticast_enable,
196         .allmulticast_disable = axgbe_dev_allmulticast_disable,
197         .mac_addr_set         = axgbe_dev_mac_addr_set,
198         .mac_addr_add         = axgbe_dev_mac_addr_add,
199         .mac_addr_remove      = axgbe_dev_mac_addr_remove,
200         .set_mc_addr_list     = axgbe_dev_set_mc_addr_list,
201         .uc_hash_table_set    = axgbe_dev_uc_hash_table_set,
202         .uc_all_hash_table_set = axgbe_dev_uc_all_hash_table_set,
203         .link_update          = axgbe_dev_link_update,
204         .get_reg              = axgbe_dev_get_regs,
205         .stats_get            = axgbe_dev_stats_get,
206         .stats_reset          = axgbe_dev_stats_reset,
207         .xstats_get           = axgbe_dev_xstats_get,
208         .xstats_reset         = axgbe_dev_xstats_reset,
209         .xstats_get_names     = axgbe_dev_xstats_get_names,
210         .xstats_get_names_by_id = axgbe_dev_xstats_get_names_by_id,
211         .xstats_get_by_id     = axgbe_dev_xstats_get_by_id,
212         .reta_update          = axgbe_dev_rss_reta_update,
213         .reta_query           = axgbe_dev_rss_reta_query,
214         .rss_hash_update      = axgbe_dev_rss_hash_update,
215         .rss_hash_conf_get    = axgbe_dev_rss_hash_conf_get,
216         .dev_infos_get        = axgbe_dev_info_get,
217         .rx_queue_setup       = axgbe_dev_rx_queue_setup,
218         .rx_queue_release     = axgbe_dev_rx_queue_release,
219         .tx_queue_setup       = axgbe_dev_tx_queue_setup,
220         .tx_queue_release     = axgbe_dev_tx_queue_release,
221         .flow_ctrl_get        = axgbe_flow_ctrl_get,
222         .flow_ctrl_set        = axgbe_flow_ctrl_set,
223         .priority_flow_ctrl_set = axgbe_priority_flow_ctrl_set,
224         .rxq_info_get                 = axgbe_rxq_info_get,
225         .txq_info_get                 = axgbe_txq_info_get,
226         .dev_supported_ptypes_get     = axgbe_dev_supported_ptypes_get,
227         .rx_descriptor_status         = axgbe_dev_rx_descriptor_status,
228         .tx_descriptor_status         = axgbe_dev_tx_descriptor_status,
229         .mtu_set                = axgb_mtu_set,
230 };
231
232 static int axgbe_phy_reset(struct axgbe_port *pdata)
233 {
234         pdata->phy_link = -1;
235         pdata->phy_speed = SPEED_UNKNOWN;
236         return pdata->phy_if.phy_reset(pdata);
237 }
238
239 /*
240  * Interrupt handler triggered by NIC  for handling
241  * specific interrupt.
242  *
243  * @param handle
244  *  Pointer to interrupt handle.
245  * @param param
246  *  The address of parameter (struct rte_eth_dev *) regsitered before.
247  *
248  * @return
249  *  void
250  */
251 static void
252 axgbe_dev_interrupt_handler(void *param)
253 {
254         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
255         struct axgbe_port *pdata = dev->data->dev_private;
256         unsigned int dma_isr, dma_ch_isr;
257
258         pdata->phy_if.an_isr(pdata);
259         /*DMA related interrupts*/
260         dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
261         PMD_DRV_LOG(DEBUG, "DMA_ISR=%#010x\n", dma_isr);
262         if (dma_isr) {
263                 if (dma_isr & 1) {
264                         dma_ch_isr =
265                                 AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
266                                                   pdata->rx_queues[0],
267                                                   DMA_CH_SR);
268                         PMD_DRV_LOG(DEBUG, "DMA_CH0_ISR=%#010x\n", dma_ch_isr);
269                         AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
270                                            pdata->rx_queues[0],
271                                            DMA_CH_SR, dma_ch_isr);
272                 }
273         }
274         /* Unmask interrupts since disabled after generation */
275         rte_intr_ack(&pdata->pci_dev->intr_handle);
276 }
277
278 /*
279  * Configure device link speed and setup link.
280  * It returns 0 on success.
281  */
282 static int
283 axgbe_dev_configure(struct rte_eth_dev *dev)
284 {
285         struct axgbe_port *pdata =  dev->data->dev_private;
286         /* Checksum offload to hardware */
287         pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
288                                 DEV_RX_OFFLOAD_CHECKSUM;
289         return 0;
290 }
291
292 static int
293 axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
294 {
295         struct axgbe_port *pdata = dev->data->dev_private;
296
297         if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
298                 pdata->rss_enable = 1;
299         else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
300                 pdata->rss_enable = 0;
301         else
302                 return  -1;
303         return 0;
304 }
305
306 static int
307 axgbe_dev_start(struct rte_eth_dev *dev)
308 {
309         struct axgbe_port *pdata = dev->data->dev_private;
310         int ret;
311         struct rte_eth_dev_data *dev_data = dev->data;
312         uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
313
314         dev->dev_ops = &axgbe_eth_dev_ops;
315
316         PMD_INIT_FUNC_TRACE();
317
318         /* Multiqueue RSS */
319         ret = axgbe_dev_rx_mq_config(dev);
320         if (ret) {
321                 PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
322                 return ret;
323         }
324         ret = axgbe_phy_reset(pdata);
325         if (ret) {
326                 PMD_DRV_LOG(ERR, "phy reset failed\n");
327                 return ret;
328         }
329         ret = pdata->hw_if.init(pdata);
330         if (ret) {
331                 PMD_DRV_LOG(ERR, "dev_init failed\n");
332                 return ret;
333         }
334
335         /* enable uio/vfio intr/eventfd mapping */
336         rte_intr_enable(&pdata->pci_dev->intr_handle);
337
338         /* phy start*/
339         pdata->phy_if.phy_start(pdata);
340         axgbe_dev_enable_tx(dev);
341         axgbe_dev_enable_rx(dev);
342
343         rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
344         rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
345         if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
346                                 max_pkt_len > pdata->rx_buf_size)
347                 dev_data->scattered_rx = 1;
348
349         /*  Scatter Rx handling */
350         if (dev_data->scattered_rx)
351                 dev->rx_pkt_burst = &eth_axgbe_recv_scattered_pkts;
352         else
353                 dev->rx_pkt_burst = &axgbe_recv_pkts;
354
355         return 0;
356 }
357
358 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
359 static void
360 axgbe_dev_stop(struct rte_eth_dev *dev)
361 {
362         struct axgbe_port *pdata = dev->data->dev_private;
363
364         PMD_INIT_FUNC_TRACE();
365
366         rte_intr_disable(&pdata->pci_dev->intr_handle);
367
368         if (rte_bit_relaxed_get32(AXGBE_STOPPED, &pdata->dev_state))
369                 return;
370
371         rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
372         axgbe_dev_disable_tx(dev);
373         axgbe_dev_disable_rx(dev);
374
375         pdata->phy_if.phy_stop(pdata);
376         pdata->hw_if.exit(pdata);
377         memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
378         rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
379 }
380
381 /* Clear all resources like TX/RX queues. */
382 static void
383 axgbe_dev_close(struct rte_eth_dev *dev)
384 {
385         axgbe_dev_clear_queues(dev);
386 }
387
388 static int
389 axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
390 {
391         struct axgbe_port *pdata = dev->data->dev_private;
392
393         PMD_INIT_FUNC_TRACE();
394
395         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
396
397         return 0;
398 }
399
400 static int
401 axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
402 {
403         struct axgbe_port *pdata = dev->data->dev_private;
404
405         PMD_INIT_FUNC_TRACE();
406
407         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
408
409         return 0;
410 }
411
412 static int
413 axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
414 {
415         struct axgbe_port *pdata = dev->data->dev_private;
416
417         PMD_INIT_FUNC_TRACE();
418
419         if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
420                 return 0;
421         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
422
423         return 0;
424 }
425
426 static int
427 axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
428 {
429         struct axgbe_port *pdata = dev->data->dev_private;
430
431         PMD_INIT_FUNC_TRACE();
432
433         if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
434                 return 0;
435         AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
436
437         return 0;
438 }
439
440 static int
441 axgbe_dev_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
442 {
443         struct axgbe_port *pdata = dev->data->dev_private;
444
445         /* Set Default MAC Addr */
446         axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, 0);
447
448         return 0;
449 }
450
451 static int
452 axgbe_dev_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
453                               uint32_t index, uint32_t pool __rte_unused)
454 {
455         struct axgbe_port *pdata = dev->data->dev_private;
456         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
457
458         if (index > hw_feat->addn_mac) {
459                 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
460                 return -EINVAL;
461         }
462         axgbe_set_mac_addn_addr(pdata, (u8 *)mac_addr, index);
463         return 0;
464 }
465
466 static int
467 axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
468                           struct rte_eth_rss_reta_entry64 *reta_conf,
469                           uint16_t reta_size)
470 {
471         struct axgbe_port *pdata = dev->data->dev_private;
472         unsigned int i, idx, shift;
473         int ret;
474
475         if (!pdata->rss_enable) {
476                 PMD_DRV_LOG(ERR, "RSS not enabled\n");
477                 return -ENOTSUP;
478         }
479
480         if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
481                 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
482                 return -EINVAL;
483         }
484
485         for (i = 0; i < reta_size; i++) {
486                 idx = i / RTE_RETA_GROUP_SIZE;
487                 shift = i % RTE_RETA_GROUP_SIZE;
488                 if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
489                         continue;
490                 pdata->rss_table[i] = reta_conf[idx].reta[shift];
491         }
492
493         /* Program the lookup table */
494         ret = axgbe_write_rss_lookup_table(pdata);
495         return ret;
496 }
497
498 static int
499 axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
500                          struct rte_eth_rss_reta_entry64 *reta_conf,
501                          uint16_t reta_size)
502 {
503         struct axgbe_port *pdata = dev->data->dev_private;
504         unsigned int i, idx, shift;
505
506         if (!pdata->rss_enable) {
507                 PMD_DRV_LOG(ERR, "RSS not enabled\n");
508                 return -ENOTSUP;
509         }
510
511         if (reta_size == 0 || reta_size > AXGBE_RSS_MAX_TABLE_SIZE) {
512                 PMD_DRV_LOG(ERR, "reta_size %d is not supported\n", reta_size);
513                 return -EINVAL;
514         }
515
516         for (i = 0; i < reta_size; i++) {
517                 idx = i / RTE_RETA_GROUP_SIZE;
518                 shift = i % RTE_RETA_GROUP_SIZE;
519                 if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
520                         continue;
521                 reta_conf[idx].reta[shift] = pdata->rss_table[i];
522         }
523         return 0;
524 }
525
526 static int
527 axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
528                           struct rte_eth_rss_conf *rss_conf)
529 {
530         struct axgbe_port *pdata = dev->data->dev_private;
531         int ret;
532
533         if (!pdata->rss_enable) {
534                 PMD_DRV_LOG(ERR, "RSS not enabled\n");
535                 return -ENOTSUP;
536         }
537
538         if (rss_conf == NULL) {
539                 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
540                 return -EINVAL;
541         }
542
543         if (rss_conf->rss_key != NULL &&
544             rss_conf->rss_key_len == AXGBE_RSS_HASH_KEY_SIZE) {
545                 rte_memcpy(pdata->rss_key, rss_conf->rss_key,
546                        AXGBE_RSS_HASH_KEY_SIZE);
547                 /* Program the hash key */
548                 ret = axgbe_write_rss_hash_key(pdata);
549                 if (ret != 0)
550                         return ret;
551         }
552
553         pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
554
555         if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
556                 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
557         if (pdata->rss_hf &
558             (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
559                 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
560         if (pdata->rss_hf &
561             (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
562                 AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
563
564         /* Set the RSS options */
565         AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
566
567         return 0;
568 }
569
570 static int
571 axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
572                             struct rte_eth_rss_conf *rss_conf)
573 {
574         struct axgbe_port *pdata = dev->data->dev_private;
575
576         if (!pdata->rss_enable) {
577                 PMD_DRV_LOG(ERR, "RSS not enabled\n");
578                 return -ENOTSUP;
579         }
580
581         if (rss_conf == NULL) {
582                 PMD_DRV_LOG(ERR, "rss_conf value isn't valid\n");
583                 return -EINVAL;
584         }
585
586         if (rss_conf->rss_key != NULL &&
587             rss_conf->rss_key_len >= AXGBE_RSS_HASH_KEY_SIZE) {
588                 rte_memcpy(rss_conf->rss_key, pdata->rss_key,
589                        AXGBE_RSS_HASH_KEY_SIZE);
590         }
591         rss_conf->rss_key_len = AXGBE_RSS_HASH_KEY_SIZE;
592         rss_conf->rss_hf = pdata->rss_hf;
593         return 0;
594 }
595
596 static void
597 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
598 {
599         struct axgbe_port *pdata = dev->data->dev_private;
600         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
601
602         if (index > hw_feat->addn_mac) {
603                 PMD_DRV_LOG(ERR, "Invalid Index %d\n", index);
604                 return;
605         }
606         axgbe_set_mac_addn_addr(pdata, NULL, index);
607 }
608
609 static int
610 axgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
611                                       struct rte_ether_addr *mc_addr_set,
612                                       uint32_t nb_mc_addr)
613 {
614         struct axgbe_port *pdata = dev->data->dev_private;
615         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
616         uint32_t index = 1; /* 0 is always default mac */
617         uint32_t i;
618
619         if (nb_mc_addr > hw_feat->addn_mac) {
620                 PMD_DRV_LOG(ERR, "Invalid Index %d\n", nb_mc_addr);
621                 return -EINVAL;
622         }
623
624         /* clear unicast addresses */
625         for (i = 1; i < hw_feat->addn_mac; i++) {
626                 if (rte_is_zero_ether_addr(&dev->data->mac_addrs[i]))
627                         continue;
628                 memset(&dev->data->mac_addrs[i], 0,
629                        sizeof(struct rte_ether_addr));
630         }
631
632         while (nb_mc_addr--)
633                 axgbe_set_mac_addn_addr(pdata, (u8 *)mc_addr_set++, index++);
634
635         return 0;
636 }
637
638 static int
639 axgbe_dev_uc_hash_table_set(struct rte_eth_dev *dev,
640                             struct rte_ether_addr *mac_addr, uint8_t add)
641 {
642         struct axgbe_port *pdata = dev->data->dev_private;
643         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
644
645         if (!hw_feat->hash_table_size) {
646                 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
647                 return -ENOTSUP;
648         }
649
650         axgbe_set_mac_hash_table(pdata, (u8 *)mac_addr, add);
651
652         if (pdata->uc_hash_mac_addr > 0) {
653                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
654                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
655         } else {
656                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
657                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
658         }
659         return 0;
660 }
661
662 static int
663 axgbe_dev_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t add)
664 {
665         struct axgbe_port *pdata = dev->data->dev_private;
666         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
667         uint32_t index;
668
669         if (!hw_feat->hash_table_size) {
670                 PMD_DRV_LOG(ERR, "MAC Hash Table not supported\n");
671                 return -ENOTSUP;
672         }
673
674         for (index = 0; index < pdata->hash_table_count; index++) {
675                 if (add)
676                         pdata->uc_hash_table[index] = ~0;
677                 else
678                         pdata->uc_hash_table[index] = 0;
679
680                 PMD_DRV_LOG(DEBUG, "%s MAC hash table at Index %#x\n",
681                             add ? "set" : "clear", index);
682
683                 AXGMAC_IOWRITE(pdata, MAC_HTR(index),
684                                pdata->uc_hash_table[index]);
685         }
686
687         if (add) {
688                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
689                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
690         } else {
691                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 0);
692                 AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
693         }
694         return 0;
695 }
696
697 /* return 0 means link status changed, -1 means not changed */
698 static int
699 axgbe_dev_link_update(struct rte_eth_dev *dev,
700                       int wait_to_complete __rte_unused)
701 {
702         struct axgbe_port *pdata = dev->data->dev_private;
703         struct rte_eth_link link;
704         int ret = 0;
705
706         PMD_INIT_FUNC_TRACE();
707         rte_delay_ms(800);
708
709         pdata->phy_if.phy_status(pdata);
710
711         memset(&link, 0, sizeof(struct rte_eth_link));
712         link.link_duplex = pdata->phy.duplex;
713         link.link_status = pdata->phy_link;
714         link.link_speed = pdata->phy_speed;
715         link.link_autoneg = !(dev->data->dev_conf.link_speeds &
716                               ETH_LINK_SPEED_FIXED);
717         ret = rte_eth_linkstatus_set(dev, &link);
718         if (ret == -1)
719                 PMD_DRV_LOG(ERR, "No change in link status\n");
720
721         return ret;
722 }
723
724 static int
725 axgbe_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
726 {
727         struct axgbe_port *pdata = dev->data->dev_private;
728
729         if (regs->data == NULL) {
730                 regs->length = axgbe_regs_get_count(pdata);
731                 regs->width = sizeof(uint32_t);
732                 return 0;
733         }
734
735         /* Only full register dump is supported */
736         if (regs->length &&
737             regs->length != (uint32_t)axgbe_regs_get_count(pdata))
738                 return -ENOTSUP;
739
740         regs->version = pdata->pci_dev->id.vendor_id << 16 |
741                         pdata->pci_dev->id.device_id;
742         axgbe_regs_dump(pdata, regs->data);
743         return 0;
744 }
745 static void axgbe_read_mmc_stats(struct axgbe_port *pdata)
746 {
747         struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
748
749         /* Freeze counters */
750         AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
751
752         /* Tx counters */
753         stats->txoctetcount_gb +=
754                 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
755         stats->txoctetcount_gb +=
756         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_HI) << 32);
757
758         stats->txframecount_gb +=
759                 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
760         stats->txframecount_gb +=
761         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_HI) << 32);
762
763         stats->txbroadcastframes_g +=
764                 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
765         stats->txbroadcastframes_g +=
766         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_HI) << 32);
767
768         stats->txmulticastframes_g +=
769                 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
770         stats->txmulticastframes_g +=
771         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_HI) << 32);
772
773         stats->tx64octets_gb +=
774                 AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
775         stats->tx64octets_gb +=
776         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_HI) << 32);
777
778         stats->tx65to127octets_gb +=
779                 AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
780         stats->tx65to127octets_gb +=
781         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_HI) << 32);
782
783         stats->tx128to255octets_gb +=
784                 AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
785         stats->tx128to255octets_gb +=
786         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_HI) << 32);
787
788         stats->tx256to511octets_gb +=
789                 AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
790         stats->tx256to511octets_gb +=
791         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_HI) << 32);
792
793         stats->tx512to1023octets_gb +=
794                 AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
795         stats->tx512to1023octets_gb +=
796         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_HI) << 32);
797
798         stats->tx1024tomaxoctets_gb +=
799                 AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
800         stats->tx1024tomaxoctets_gb +=
801         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_HI) << 32);
802
803         stats->txunicastframes_gb +=
804                 AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
805         stats->txunicastframes_gb +=
806         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_HI) << 32);
807
808         stats->txmulticastframes_gb +=
809                 AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
810         stats->txmulticastframes_gb +=
811         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_HI) << 32);
812
813         stats->txbroadcastframes_g +=
814                 AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
815         stats->txbroadcastframes_g +=
816         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_HI) << 32);
817
818         stats->txunderflowerror +=
819                 AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
820         stats->txunderflowerror +=
821         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_HI) << 32);
822
823         stats->txoctetcount_g +=
824                 AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
825         stats->txoctetcount_g +=
826         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_HI) << 32);
827
828         stats->txframecount_g +=
829                 AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
830         stats->txframecount_g +=
831         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_HI) << 32);
832
833         stats->txpauseframes +=
834                 AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
835         stats->txpauseframes +=
836         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_HI) << 32);
837
838         stats->txvlanframes_g +=
839                 AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
840         stats->txvlanframes_g +=
841         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_HI) << 32);
842
843         /* Rx counters */
844         stats->rxframecount_gb +=
845                 AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
846         stats->rxframecount_gb +=
847         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_HI) << 32);
848
849         stats->rxoctetcount_gb +=
850                 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
851         stats->rxoctetcount_gb +=
852         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_HI) << 32);
853
854         stats->rxoctetcount_g +=
855                 AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
856         stats->rxoctetcount_g +=
857         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_HI) << 32);
858
859         stats->rxbroadcastframes_g +=
860                 AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
861         stats->rxbroadcastframes_g +=
862         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_HI) << 32);
863
864         stats->rxmulticastframes_g +=
865                 AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
866         stats->rxmulticastframes_g +=
867         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_HI) << 32);
868
869         stats->rxcrcerror +=
870                 AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
871         stats->rxcrcerror +=
872         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXCRCERROR_HI) << 32);
873
874         stats->rxrunterror +=
875                 AXGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
876
877         stats->rxjabbererror +=
878                 AXGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
879
880         stats->rxundersize_g +=
881                 AXGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
882
883         stats->rxoversize_g +=
884                 AXGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
885
886         stats->rx64octets_gb +=
887                 AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
888         stats->rx64octets_gb +=
889         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_HI) << 32);
890
891         stats->rx65to127octets_gb +=
892                 AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
893         stats->rx65to127octets_gb +=
894         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_HI) << 32);
895
896         stats->rx128to255octets_gb +=
897                 AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
898         stats->rx128to255octets_gb +=
899         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_HI) << 32);
900
901         stats->rx256to511octets_gb +=
902                 AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
903         stats->rx256to511octets_gb +=
904         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_HI) << 32);
905
906         stats->rx512to1023octets_gb +=
907                 AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
908         stats->rx512to1023octets_gb +=
909         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_HI) << 32);
910
911         stats->rx1024tomaxoctets_gb +=
912                 AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
913         stats->rx1024tomaxoctets_gb +=
914         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_HI) << 32);
915
916         stats->rxunicastframes_g +=
917                 AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
918         stats->rxunicastframes_g +=
919         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_HI) << 32);
920
921         stats->rxlengtherror +=
922                 AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
923         stats->rxlengtherror +=
924         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_HI) << 32);
925
926         stats->rxoutofrangetype +=
927                 AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
928         stats->rxoutofrangetype +=
929         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_HI) << 32);
930
931         stats->rxpauseframes +=
932                 AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
933         stats->rxpauseframes +=
934         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_HI) << 32);
935
936         stats->rxfifooverflow +=
937                 AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
938         stats->rxfifooverflow +=
939         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_HI) << 32);
940
941         stats->rxvlanframes_gb +=
942                 AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
943         stats->rxvlanframes_gb +=
944         ((uint64_t)AXGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_HI) << 32);
945
946         stats->rxwatchdogerror +=
947                 AXGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
948
949         /* Un-freeze counters */
950         AXGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
951 }
952
953 static int
954 axgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
955                      unsigned int n)
956 {
957         struct axgbe_port *pdata = dev->data->dev_private;
958         unsigned int i;
959
960         if (!stats)
961                 return 0;
962
963         axgbe_read_mmc_stats(pdata);
964
965         for (i = 0; i < n && i < AXGBE_XSTATS_COUNT; i++) {
966                 stats[i].id = i;
967                 stats[i].value = *(u64 *)((uint8_t *)&pdata->mmc_stats +
968                                 axgbe_xstats_strings[i].offset);
969         }
970
971         return i;
972 }
973
974 static int
975 axgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
976                            struct rte_eth_xstat_name *xstats_names,
977                            unsigned int n)
978 {
979         unsigned int i;
980
981         if (n >= AXGBE_XSTATS_COUNT && xstats_names) {
982                 for (i = 0; i < AXGBE_XSTATS_COUNT; ++i) {
983                         snprintf(xstats_names[i].name,
984                                  RTE_ETH_XSTATS_NAME_SIZE, "%s",
985                                  axgbe_xstats_strings[i].name);
986                 }
987         }
988
989         return AXGBE_XSTATS_COUNT;
990 }
991
992 static int
993 axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
994                            uint64_t *values, unsigned int n)
995 {
996         unsigned int i;
997         uint64_t values_copy[AXGBE_XSTATS_COUNT];
998
999         if (!ids) {
1000                 struct axgbe_port *pdata = dev->data->dev_private;
1001
1002                 if (n < AXGBE_XSTATS_COUNT)
1003                         return AXGBE_XSTATS_COUNT;
1004
1005                 axgbe_read_mmc_stats(pdata);
1006
1007                 for (i = 0; i < AXGBE_XSTATS_COUNT; i++) {
1008                         values[i] = *(u64 *)((uint8_t *)&pdata->mmc_stats +
1009                                         axgbe_xstats_strings[i].offset);
1010                 }
1011
1012                 return i;
1013         }
1014
1015         axgbe_dev_xstats_get_by_id(dev, NULL, values_copy, AXGBE_XSTATS_COUNT);
1016
1017         for (i = 0; i < n; i++) {
1018                 if (ids[i] >= AXGBE_XSTATS_COUNT) {
1019                         PMD_DRV_LOG(ERR, "id value isn't valid\n");
1020                         return -1;
1021                 }
1022                 values[i] = values_copy[ids[i]];
1023         }
1024         return n;
1025 }
1026
1027 static int
1028 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
1029                                  struct rte_eth_xstat_name *xstats_names,
1030                                  const uint64_t *ids,
1031                                  unsigned int size)
1032 {
1033         struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
1034         unsigned int i;
1035
1036         if (!ids)
1037                 return axgbe_dev_xstats_get_names(dev, xstats_names, size);
1038
1039         axgbe_dev_xstats_get_names(dev, xstats_names_copy, size);
1040
1041         for (i = 0; i < size; i++) {
1042                 if (ids[i] >= AXGBE_XSTATS_COUNT) {
1043                         PMD_DRV_LOG(ERR, "id value isn't valid\n");
1044                         return -1;
1045                 }
1046                 strcpy(xstats_names[i].name, xstats_names_copy[ids[i]].name);
1047         }
1048         return size;
1049 }
1050
1051 static int
1052 axgbe_dev_xstats_reset(struct rte_eth_dev *dev)
1053 {
1054         struct axgbe_port *pdata = dev->data->dev_private;
1055         struct axgbe_mmc_stats *stats = &pdata->mmc_stats;
1056
1057         /* MMC registers are configured for reset on read */
1058         axgbe_read_mmc_stats(pdata);
1059
1060         /* Reset stats */
1061         memset(stats, 0, sizeof(*stats));
1062
1063         return 0;
1064 }
1065
1066 static int
1067 axgbe_dev_stats_get(struct rte_eth_dev *dev,
1068                     struct rte_eth_stats *stats)
1069 {
1070         struct axgbe_rx_queue *rxq;
1071         struct axgbe_tx_queue *txq;
1072         struct axgbe_port *pdata = dev->data->dev_private;
1073         struct axgbe_mmc_stats *mmc_stats = &pdata->mmc_stats;
1074         unsigned int i;
1075
1076         axgbe_read_mmc_stats(pdata);
1077
1078         stats->imissed = mmc_stats->rxfifooverflow;
1079
1080         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1081                 rxq = dev->data->rx_queues[i];
1082                 stats->q_ipackets[i] = rxq->pkts;
1083                 stats->ipackets += rxq->pkts;
1084                 stats->q_ibytes[i] = rxq->bytes;
1085                 stats->ibytes += rxq->bytes;
1086                 stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
1087                 stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed;
1088                 stats->ierrors += rxq->errors;
1089         }
1090
1091         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1092                 txq = dev->data->tx_queues[i];
1093                 stats->q_opackets[i] = txq->pkts;
1094                 stats->opackets += txq->pkts;
1095                 stats->q_obytes[i] = txq->bytes;
1096                 stats->obytes += txq->bytes;
1097                 stats->oerrors += txq->errors;
1098         }
1099
1100         return 0;
1101 }
1102
1103 static int
1104 axgbe_dev_stats_reset(struct rte_eth_dev *dev)
1105 {
1106         struct axgbe_rx_queue *rxq;
1107         struct axgbe_tx_queue *txq;
1108         unsigned int i;
1109
1110         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1111                 rxq = dev->data->rx_queues[i];
1112                 rxq->pkts = 0;
1113                 rxq->bytes = 0;
1114                 rxq->errors = 0;
1115                 rxq->rx_mbuf_alloc_failed = 0;
1116         }
1117         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1118                 txq = dev->data->tx_queues[i];
1119                 txq->pkts = 0;
1120                 txq->bytes = 0;
1121                 txq->errors = 0;
1122         }
1123
1124         return 0;
1125 }
1126
1127 static int
1128 axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1129 {
1130         struct axgbe_port *pdata = dev->data->dev_private;
1131
1132         dev_info->max_rx_queues = pdata->rx_ring_count;
1133         dev_info->max_tx_queues = pdata->tx_ring_count;
1134         dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
1135         dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
1136         dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
1137         dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
1138         dev_info->speed_capa =  ETH_LINK_SPEED_10G;
1139
1140         dev_info->rx_offload_capa =
1141                 DEV_RX_OFFLOAD_IPV4_CKSUM |
1142                 DEV_RX_OFFLOAD_UDP_CKSUM  |
1143                 DEV_RX_OFFLOAD_TCP_CKSUM  |
1144                 DEV_RX_OFFLOAD_JUMBO_FRAME      |
1145                 DEV_RX_OFFLOAD_SCATTER    |
1146                 DEV_RX_OFFLOAD_KEEP_CRC;
1147
1148         dev_info->tx_offload_capa =
1149                 DEV_TX_OFFLOAD_IPV4_CKSUM  |
1150                 DEV_TX_OFFLOAD_UDP_CKSUM   |
1151                 DEV_TX_OFFLOAD_TCP_CKSUM;
1152
1153         if (pdata->hw_feat.rss) {
1154                 dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
1155                 dev_info->reta_size = pdata->hw_feat.hash_table_size;
1156                 dev_info->hash_key_size =  AXGBE_RSS_HASH_KEY_SIZE;
1157         }
1158
1159         dev_info->rx_desc_lim = rx_desc_lim;
1160         dev_info->tx_desc_lim = tx_desc_lim;
1161
1162         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1163                 .rx_free_thresh = AXGBE_RX_FREE_THRESH,
1164         };
1165
1166         dev_info->default_txconf = (struct rte_eth_txconf) {
1167                 .tx_free_thresh = AXGBE_TX_FREE_THRESH,
1168         };
1169
1170         return 0;
1171 }
1172
1173 static int
1174 axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1175 {
1176         struct axgbe_port *pdata = dev->data->dev_private;
1177         struct xgbe_fc_info fc = pdata->fc;
1178         unsigned int reg, reg_val = 0;
1179
1180         reg = MAC_Q0TFCR;
1181         reg_val = AXGMAC_IOREAD(pdata, reg);
1182         fc.low_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFA);
1183         fc.high_water[0] =  AXGMAC_MTL_IOREAD_BITS(pdata, 0, MTL_Q_RQFCR, RFD);
1184         fc.pause_time[0] = AXGMAC_GET_BITS(reg_val, MAC_Q0TFCR, PT);
1185         fc.autoneg = pdata->pause_autoneg;
1186
1187         if (pdata->rx_pause && pdata->tx_pause)
1188                 fc.mode = RTE_FC_FULL;
1189         else if (pdata->rx_pause)
1190                 fc.mode = RTE_FC_RX_PAUSE;
1191         else if (pdata->tx_pause)
1192                 fc.mode = RTE_FC_TX_PAUSE;
1193         else
1194                 fc.mode = RTE_FC_NONE;
1195
1196         fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
1197         fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
1198         fc_conf->pause_time = fc.pause_time[0];
1199         fc_conf->send_xon = fc.send_xon;
1200         fc_conf->mode = fc.mode;
1201
1202         return 0;
1203 }
1204
1205 static int
1206 axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1207 {
1208         struct axgbe_port *pdata = dev->data->dev_private;
1209         struct xgbe_fc_info fc = pdata->fc;
1210         unsigned int reg, reg_val = 0;
1211         reg = MAC_Q0TFCR;
1212
1213         pdata->pause_autoneg = fc_conf->autoneg;
1214         pdata->phy.pause_autoneg = pdata->pause_autoneg;
1215         fc.send_xon = fc_conf->send_xon;
1216         AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFA,
1217                         AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->high_water));
1218         AXGMAC_MTL_IOWRITE_BITS(pdata, 0, MTL_Q_RQFCR, RFD,
1219                         AXGMAC_FLOW_CONTROL_VALUE(1024 * fc_conf->low_water));
1220         AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, fc_conf->pause_time);
1221         AXGMAC_IOWRITE(pdata, reg, reg_val);
1222         fc.mode = fc_conf->mode;
1223
1224         if (fc.mode == RTE_FC_FULL) {
1225                 pdata->tx_pause = 1;
1226                 pdata->rx_pause = 1;
1227         } else if (fc.mode == RTE_FC_RX_PAUSE) {
1228                 pdata->tx_pause = 0;
1229                 pdata->rx_pause = 1;
1230         } else if (fc.mode == RTE_FC_TX_PAUSE) {
1231                 pdata->tx_pause = 1;
1232                 pdata->rx_pause = 0;
1233         } else {
1234                 pdata->tx_pause = 0;
1235                 pdata->rx_pause = 0;
1236         }
1237
1238         if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1239                 pdata->hw_if.config_tx_flow_control(pdata);
1240
1241         if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1242                 pdata->hw_if.config_rx_flow_control(pdata);
1243
1244         pdata->hw_if.config_flow_control(pdata);
1245         pdata->phy.tx_pause = pdata->tx_pause;
1246         pdata->phy.rx_pause = pdata->rx_pause;
1247
1248         return 0;
1249 }
1250
1251 static int
1252 axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
1253                 struct rte_eth_pfc_conf *pfc_conf)
1254 {
1255         struct axgbe_port *pdata = dev->data->dev_private;
1256         struct xgbe_fc_info fc = pdata->fc;
1257         uint8_t tc_num;
1258
1259         tc_num = pdata->pfc_map[pfc_conf->priority];
1260
1261         if (pfc_conf->priority >= pdata->hw_feat.tc_cnt) {
1262                 PMD_INIT_LOG(ERR, "Max supported  traffic class: %d\n",
1263                                 pdata->hw_feat.tc_cnt);
1264         return -EINVAL;
1265         }
1266
1267         pdata->pause_autoneg = pfc_conf->fc.autoneg;
1268         pdata->phy.pause_autoneg = pdata->pause_autoneg;
1269         fc.send_xon = pfc_conf->fc.send_xon;
1270         AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFA,
1271                 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.high_water));
1272         AXGMAC_MTL_IOWRITE_BITS(pdata, tc_num, MTL_Q_RQFCR, RFD,
1273                 AXGMAC_FLOW_CONTROL_VALUE(1024 * pfc_conf->fc.low_water));
1274
1275         switch (tc_num) {
1276         case 0:
1277                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1278                                 PSTC0, pfc_conf->fc.pause_time);
1279                 break;
1280         case 1:
1281                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1282                                 PSTC1, pfc_conf->fc.pause_time);
1283                 break;
1284         case 2:
1285                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1286                                 PSTC2, pfc_conf->fc.pause_time);
1287                 break;
1288         case 3:
1289                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM0R,
1290                                 PSTC3, pfc_conf->fc.pause_time);
1291                 break;
1292         case 4:
1293                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1294                                 PSTC4, pfc_conf->fc.pause_time);
1295                 break;
1296         case 5:
1297                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1298                                 PSTC5, pfc_conf->fc.pause_time);
1299                 break;
1300         case 7:
1301                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1302                                 PSTC6, pfc_conf->fc.pause_time);
1303                 break;
1304         case 6:
1305                 AXGMAC_IOWRITE_BITS(pdata, MTL_TCPM1R,
1306                                 PSTC7, pfc_conf->fc.pause_time);
1307                 break;
1308         }
1309
1310         fc.mode = pfc_conf->fc.mode;
1311
1312         if (fc.mode == RTE_FC_FULL) {
1313                 pdata->tx_pause = 1;
1314                 pdata->rx_pause = 1;
1315                 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1316         } else if (fc.mode == RTE_FC_RX_PAUSE) {
1317                 pdata->tx_pause = 0;
1318                 pdata->rx_pause = 1;
1319                 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
1320         } else if (fc.mode == RTE_FC_TX_PAUSE) {
1321                 pdata->tx_pause = 1;
1322                 pdata->rx_pause = 0;
1323                 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1324         } else {
1325                 pdata->tx_pause = 0;
1326                 pdata->rx_pause = 0;
1327                 AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
1328         }
1329
1330         if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause)
1331                 pdata->hw_if.config_tx_flow_control(pdata);
1332
1333         if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause)
1334                 pdata->hw_if.config_rx_flow_control(pdata);
1335         pdata->hw_if.config_flow_control(pdata);
1336         pdata->phy.tx_pause = pdata->tx_pause;
1337         pdata->phy.rx_pause = pdata->rx_pause;
1338
1339         return 0;
1340 }
1341
1342 void
1343 axgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1344         struct rte_eth_rxq_info *qinfo)
1345 {
1346         struct   axgbe_rx_queue *rxq;
1347
1348         rxq = dev->data->rx_queues[queue_id];
1349         qinfo->mp = rxq->mb_pool;
1350         qinfo->scattered_rx = dev->data->scattered_rx;
1351         qinfo->nb_desc = rxq->nb_desc;
1352         qinfo->conf.rx_free_thresh = rxq->free_thresh;
1353 }
1354
1355 void
1356 axgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1357         struct rte_eth_txq_info *qinfo)
1358 {
1359         struct  axgbe_tx_queue *txq;
1360
1361         txq = dev->data->tx_queues[queue_id];
1362         qinfo->nb_desc = txq->nb_desc;
1363         qinfo->conf.tx_free_thresh = txq->free_thresh;
1364 }
1365 const uint32_t *
1366 axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1367 {
1368         static const uint32_t ptypes[] = {
1369                 RTE_PTYPE_L2_ETHER,
1370                 RTE_PTYPE_L2_ETHER_TIMESYNC,
1371                 RTE_PTYPE_L2_ETHER_LLDP,
1372                 RTE_PTYPE_L2_ETHER_ARP,
1373                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1374                 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
1375                 RTE_PTYPE_L4_FRAG,
1376                 RTE_PTYPE_L4_ICMP,
1377                 RTE_PTYPE_L4_NONFRAG,
1378                 RTE_PTYPE_L4_SCTP,
1379                 RTE_PTYPE_L4_TCP,
1380                 RTE_PTYPE_L4_UDP,
1381                 RTE_PTYPE_TUNNEL_GRENAT,
1382                 RTE_PTYPE_TUNNEL_IP,
1383                 RTE_PTYPE_INNER_L2_ETHER,
1384                 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1385                 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
1386                 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
1387                 RTE_PTYPE_INNER_L4_FRAG,
1388                 RTE_PTYPE_INNER_L4_ICMP,
1389                 RTE_PTYPE_INNER_L4_NONFRAG,
1390                 RTE_PTYPE_INNER_L4_SCTP,
1391                 RTE_PTYPE_INNER_L4_TCP,
1392                 RTE_PTYPE_INNER_L4_UDP,
1393                 RTE_PTYPE_UNKNOWN
1394         };
1395
1396         if (dev->rx_pkt_burst == axgbe_recv_pkts)
1397                 return ptypes;
1398         return NULL;
1399 }
1400 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1401 {
1402         struct rte_eth_dev_info dev_info;
1403         struct axgbe_port *pdata = dev->data->dev_private;
1404         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1405         unsigned int val = 0;
1406         axgbe_dev_info_get(dev, &dev_info);
1407         /* check that mtu is within the allowed range */
1408         if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1409                 return -EINVAL;
1410         /* mtu setting is forbidden if port is start */
1411         if (dev->data->dev_started) {
1412                 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1413                                 dev->data->port_id);
1414                 return -EBUSY;
1415         }
1416         if (frame_size > RTE_ETHER_MAX_LEN) {
1417                 dev->data->dev_conf.rxmode.offloads |=
1418                         DEV_RX_OFFLOAD_JUMBO_FRAME;
1419                 val = 1;
1420         } else {
1421                 dev->data->dev_conf.rxmode.offloads &=
1422                         ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1423                 val = 0;
1424         }
1425         AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
1426         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1427         return 0;
1428 }
1429 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
1430 {
1431         unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
1432         struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
1433
1434         mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
1435         mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
1436         mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
1437
1438         memset(hw_feat, 0, sizeof(*hw_feat));
1439
1440         hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
1441
1442         /* Hardware feature register 0 */
1443         hw_feat->gmii        = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
1444         hw_feat->vlhash      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
1445         hw_feat->sma         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
1446         hw_feat->rwk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
1447         hw_feat->mgk         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
1448         hw_feat->mmc         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
1449         hw_feat->aoe         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
1450         hw_feat->ts          = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
1451         hw_feat->eee         = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
1452         hw_feat->tx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
1453         hw_feat->rx_coe      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
1454         hw_feat->addn_mac    = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
1455                                               ADDMACADRSEL);
1456         hw_feat->ts_src      = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
1457         hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
1458
1459         /* Hardware feature register 1 */
1460         hw_feat->rx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1461                                                 RXFIFOSIZE);
1462         hw_feat->tx_fifo_size  = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1463                                                 TXFIFOSIZE);
1464         hw_feat->adv_ts_hi     = AXGMAC_GET_BITS(mac_hfr1,
1465                                                  MAC_HWF1R, ADVTHWORD);
1466         hw_feat->dma_width     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
1467         hw_feat->dcb           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
1468         hw_feat->sph           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
1469         hw_feat->tso           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
1470         hw_feat->dma_debug     = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
1471         hw_feat->rss           = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
1472         hw_feat->tc_cnt        = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
1473         hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1474                                                   HASHTBLSZ);
1475         hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
1476                                                   L3L4FNUM);
1477
1478         /* Hardware feature register 2 */
1479         hw_feat->rx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
1480         hw_feat->tx_q_cnt     = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
1481         hw_feat->rx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
1482         hw_feat->tx_ch_cnt    = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
1483         hw_feat->pps_out_num  = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
1484         hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
1485                                                 AUXSNAPNUM);
1486
1487         /* Translate the Hash Table size into actual number */
1488         switch (hw_feat->hash_table_size) {
1489         case 0:
1490                 break;
1491         case 1:
1492                 hw_feat->hash_table_size = 64;
1493                 break;
1494         case 2:
1495                 hw_feat->hash_table_size = 128;
1496                 break;
1497         case 3:
1498                 hw_feat->hash_table_size = 256;
1499                 break;
1500         }
1501
1502         /* Translate the address width setting into actual number */
1503         switch (hw_feat->dma_width) {
1504         case 0:
1505                 hw_feat->dma_width = 32;
1506                 break;
1507         case 1:
1508                 hw_feat->dma_width = 40;
1509                 break;
1510         case 2:
1511                 hw_feat->dma_width = 48;
1512                 break;
1513         default:
1514                 hw_feat->dma_width = 32;
1515         }
1516
1517         /* The Queue, Channel and TC counts are zero based so increment them
1518          * to get the actual number
1519          */
1520         hw_feat->rx_q_cnt++;
1521         hw_feat->tx_q_cnt++;
1522         hw_feat->rx_ch_cnt++;
1523         hw_feat->tx_ch_cnt++;
1524         hw_feat->tc_cnt++;
1525
1526         /* Translate the fifo sizes into actual numbers */
1527         hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
1528         hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
1529 }
1530
1531 static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
1532 {
1533         axgbe_init_function_ptrs_dev(&pdata->hw_if);
1534         axgbe_init_function_ptrs_phy(&pdata->phy_if);
1535         axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
1536         pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
1537 }
1538
1539 static void axgbe_set_counts(struct axgbe_port *pdata)
1540 {
1541         /* Set all the function pointers */
1542         axgbe_init_all_fptrs(pdata);
1543
1544         /* Populate the hardware features */
1545         axgbe_get_all_hw_features(pdata);
1546
1547         /* Set default max values if not provided */
1548         if (!pdata->tx_max_channel_count)
1549                 pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
1550         if (!pdata->rx_max_channel_count)
1551                 pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
1552
1553         if (!pdata->tx_max_q_count)
1554                 pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
1555         if (!pdata->rx_max_q_count)
1556                 pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
1557
1558         /* Calculate the number of Tx and Rx rings to be created
1559          *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
1560          *   the number of Tx queues to the number of Tx channels
1561          *   enabled
1562          *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
1563          *   number of Rx queues or maximum allowed
1564          */
1565         pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
1566                                      pdata->tx_max_channel_count);
1567         pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
1568                                      pdata->tx_max_q_count);
1569
1570         pdata->tx_q_count = pdata->tx_ring_count;
1571
1572         pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
1573                                      pdata->rx_max_channel_count);
1574
1575         pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
1576                                   pdata->rx_max_q_count);
1577 }
1578
1579 static void axgbe_default_config(struct axgbe_port *pdata)
1580 {
1581         pdata->pblx8 = DMA_PBL_X8_ENABLE;
1582         pdata->tx_sf_mode = MTL_TSF_ENABLE;
1583         pdata->tx_threshold = MTL_TX_THRESHOLD_64;
1584         pdata->tx_pbl = DMA_PBL_32;
1585         pdata->tx_osp_mode = DMA_OSP_ENABLE;
1586         pdata->rx_sf_mode = MTL_RSF_ENABLE;
1587         pdata->rx_threshold = MTL_RX_THRESHOLD_64;
1588         pdata->rx_pbl = DMA_PBL_32;
1589         pdata->pause_autoneg = 1;
1590         pdata->tx_pause = 0;
1591         pdata->rx_pause = 0;
1592         pdata->phy_speed = SPEED_UNKNOWN;
1593         pdata->power_down = 0;
1594 }
1595
1596 static int
1597 pci_device_cmp(const struct rte_device *dev, const void *_pci_id)
1598 {
1599         const struct rte_pci_device *pdev = RTE_DEV_TO_PCI_CONST(dev);
1600         const struct rte_pci_id *pcid = _pci_id;
1601
1602         if (pdev->id.vendor_id == AMD_PCI_VENDOR_ID &&
1603                         pdev->id.device_id == pcid->device_id)
1604                 return 0;
1605         return 1;
1606 }
1607
1608 static bool
1609 pci_search_device(int device_id)
1610 {
1611         struct rte_bus *pci_bus;
1612         struct rte_pci_id dev_id;
1613
1614         dev_id.device_id = device_id;
1615         pci_bus = rte_bus_find_by_name("pci");
1616         return (pci_bus != NULL) &&
1617                 (pci_bus->find_device(NULL, pci_device_cmp, &dev_id) != NULL);
1618 }
1619
1620 /*
1621  * It returns 0 on success.
1622  */
1623 static int
1624 eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
1625 {
1626         PMD_INIT_FUNC_TRACE();
1627         struct axgbe_port *pdata;
1628         struct rte_pci_device *pci_dev;
1629         uint32_t reg, mac_lo, mac_hi;
1630         uint32_t len;
1631         int ret;
1632
1633         eth_dev->dev_ops = &axgbe_eth_dev_ops;
1634
1635         /*
1636          * For secondary processes, we don't initialise any further as primary
1637          * has already done this work.
1638          */
1639         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1640                 return 0;
1641
1642         pdata = eth_dev->data->dev_private;
1643         /* initial state */
1644         rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
1645         rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
1646         pdata->eth_dev = eth_dev;
1647
1648         pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1649         pdata->pci_dev = pci_dev;
1650
1651         /*
1652          * Use root complex device ID to differentiate RV AXGBE vs SNOWY AXGBE
1653          */
1654         if (pci_search_device(AMD_PCI_RV_ROOT_COMPLEX_ID)) {
1655                 pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
1656                 pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
1657         } else {
1658                 pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
1659                 pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
1660         }
1661
1662         pdata->xgmac_regs =
1663                 (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
1664         pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
1665                                      + AXGBE_MAC_PROP_OFFSET);
1666         pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
1667                                     + AXGBE_I2C_CTRL_OFFSET);
1668         pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
1669
1670         /* version specific driver data*/
1671         if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
1672                 pdata->vdata = &axgbe_v2a;
1673         else
1674                 pdata->vdata = &axgbe_v2b;
1675
1676         /* Configure the PCS indirect addressing support */
1677         reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
1678         pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
1679         pdata->xpcs_window <<= 6;
1680         pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
1681         pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
1682         pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
1683
1684         PMD_INIT_LOG(DEBUG,
1685                      "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
1686                      pdata->xpcs_window_size, pdata->xpcs_window_mask);
1687         XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
1688
1689         /* Retrieve the MAC address */
1690         mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
1691         mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
1692         pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
1693         pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
1694         pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
1695         pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
1696         pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
1697         pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8)  &  0xff;
1698
1699         len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_MAC_ADDRS;
1700         eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", len, 0);
1701
1702         if (!eth_dev->data->mac_addrs) {
1703                 PMD_INIT_LOG(ERR,
1704                              "Failed to alloc %u bytes needed to "
1705                              "store MAC addresses", len);
1706                 return -ENOMEM;
1707         }
1708
1709         /* Allocate memory for storing hash filter MAC addresses */
1710         len = RTE_ETHER_ADDR_LEN * AXGBE_MAX_HASH_MAC_ADDRS;
1711         eth_dev->data->hash_mac_addrs = rte_zmalloc("axgbe_hash_mac_addr",
1712                                                     len, 0);
1713
1714         if (eth_dev->data->hash_mac_addrs == NULL) {
1715                 PMD_INIT_LOG(ERR,
1716                              "Failed to allocate %d bytes needed to "
1717                              "store MAC addresses", len);
1718                 return -ENOMEM;
1719         }
1720
1721         if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr))
1722                 rte_eth_random_addr(pdata->mac_addr.addr_bytes);
1723
1724         /* Copy the permanent MAC address */
1725         rte_ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
1726
1727         /* Clock settings */
1728         pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
1729         pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
1730
1731         /* Set the DMA coherency values */
1732         pdata->coherent = 1;
1733         pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
1734         pdata->arcache = AXGBE_DMA_OS_ARCACHE;
1735         pdata->awcache = AXGBE_DMA_OS_AWCACHE;
1736
1737         /* Set the maximum channels and queues */
1738         reg = XP_IOREAD(pdata, XP_PROP_1);
1739         pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
1740         pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
1741         pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
1742         pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
1743
1744         /* Set the hardware channel and queue counts */
1745         axgbe_set_counts(pdata);
1746
1747         /* Set the maximum fifo amounts */
1748         reg = XP_IOREAD(pdata, XP_PROP_2);
1749         pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
1750         pdata->tx_max_fifo_size *= 16384;
1751         pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
1752                                           pdata->vdata->tx_max_fifo_size);
1753         pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
1754         pdata->rx_max_fifo_size *= 16384;
1755         pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
1756                                           pdata->vdata->rx_max_fifo_size);
1757         /* Issue software reset to DMA */
1758         ret = pdata->hw_if.exit(pdata);
1759         if (ret)
1760                 PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
1761
1762         /* Set default configuration data */
1763         axgbe_default_config(pdata);
1764
1765         /* Set default max values if not provided */
1766         if (!pdata->tx_max_fifo_size)
1767                 pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
1768         if (!pdata->rx_max_fifo_size)
1769                 pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
1770
1771         pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
1772         pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
1773         pthread_mutex_init(&pdata->xpcs_mutex, NULL);
1774         pthread_mutex_init(&pdata->i2c_mutex, NULL);
1775         pthread_mutex_init(&pdata->an_mutex, NULL);
1776         pthread_mutex_init(&pdata->phy_mutex, NULL);
1777
1778         ret = pdata->phy_if.phy_init(pdata);
1779         if (ret) {
1780                 rte_free(eth_dev->data->mac_addrs);
1781                 eth_dev->data->mac_addrs = NULL;
1782                 return ret;
1783         }
1784
1785         rte_intr_callback_register(&pci_dev->intr_handle,
1786                                    axgbe_dev_interrupt_handler,
1787                                    (void *)eth_dev);
1788         PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
1789                      eth_dev->data->port_id, pci_dev->id.vendor_id,
1790                      pci_dev->id.device_id);
1791
1792         return 0;
1793 }
1794
1795 static int
1796 eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
1797 {
1798         struct rte_pci_device *pci_dev;
1799
1800         PMD_INIT_FUNC_TRACE();
1801
1802         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1803                 return 0;
1804
1805         pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
1806         eth_dev->dev_ops = NULL;
1807         eth_dev->rx_pkt_burst = NULL;
1808         eth_dev->tx_pkt_burst = NULL;
1809         axgbe_dev_clear_queues(eth_dev);
1810
1811         /* disable uio intr before callback unregister */
1812         rte_intr_disable(&pci_dev->intr_handle);
1813         rte_intr_callback_unregister(&pci_dev->intr_handle,
1814                                      axgbe_dev_interrupt_handler,
1815                                      (void *)eth_dev);
1816
1817         return 0;
1818 }
1819
1820 static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1821         struct rte_pci_device *pci_dev)
1822 {
1823         return rte_eth_dev_pci_generic_probe(pci_dev,
1824                 sizeof(struct axgbe_port), eth_axgbe_dev_init);
1825 }
1826
1827 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
1828 {
1829         return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
1830 }
1831
1832 static struct rte_pci_driver rte_axgbe_pmd = {
1833         .id_table = pci_id_axgbe_map,
1834         .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1835         .probe = eth_axgbe_pci_probe,
1836         .remove = eth_axgbe_pci_remove,
1837 };
1838
1839 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
1840 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
1841 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
1842 RTE_LOG_REGISTER(axgbe_logtype_init, pmd.net.axgbe.init, NOTICE);
1843 RTE_LOG_REGISTER(axgbe_logtype_driver, pmd.net.axgbe.driver, NOTICE);