ethdev: remove underscore prefix from internal API
[dpdk.git] / drivers / net / atlantic / atl_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34                                     struct rte_eth_xstat_name *xstats_names,
35                                     unsigned int size);
36
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38                                 struct rte_eth_stats *stats);
39
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41                               struct rte_eth_xstat *stats, unsigned int n);
42
43 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
44
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46                               size_t fw_size);
47
48 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
49
50 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
51
52 /* VLAN stuff */
53 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
54                 uint16_t vlan_id, int on);
55
56 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
57
58 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
59                                      uint16_t queue_id, int on);
60
61 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
62                              enum rte_vlan_type vlan_type, uint16_t tpid);
63
64 /* EEPROM */
65 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
66 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
67                               struct rte_dev_eeprom_info *eeprom);
68 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
69                               struct rte_dev_eeprom_info *eeprom);
70
71 /* Regs */
72 static int atl_dev_get_regs(struct rte_eth_dev *dev,
73                             struct rte_dev_reg_info *regs);
74
75 /* Flow control */
76 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
77                                struct rte_eth_fc_conf *fc_conf);
78 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
79                                struct rte_eth_fc_conf *fc_conf);
80
81 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
82
83 /* Interrupts */
84 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
85 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
86 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
87 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
88                                     struct rte_intr_handle *handle);
89 static void atl_dev_interrupt_handler(void *param);
90
91
92 static int atl_add_mac_addr(struct rte_eth_dev *dev,
93                             struct rte_ether_addr *mac_addr,
94                             uint32_t index, uint32_t pool);
95 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
96 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
97                                            struct rte_ether_addr *mac_addr);
98
99 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
100                                     struct rte_ether_addr *mc_addr_set,
101                                     uint32_t nb_mc_addr);
102
103 /* RSS */
104 static int atl_reta_update(struct rte_eth_dev *dev,
105                              struct rte_eth_rss_reta_entry64 *reta_conf,
106                              uint16_t reta_size);
107 static int atl_reta_query(struct rte_eth_dev *dev,
108                             struct rte_eth_rss_reta_entry64 *reta_conf,
109                             uint16_t reta_size);
110 static int atl_rss_hash_update(struct rte_eth_dev *dev,
111                                  struct rte_eth_rss_conf *rss_conf);
112 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
113                                    struct rte_eth_rss_conf *rss_conf);
114
115
116 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
117         struct rte_pci_device *pci_dev);
118 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
119
120 static int atl_dev_info_get(struct rte_eth_dev *dev,
121                                 struct rte_eth_dev_info *dev_info);
122
123 /*
124  * The set of PCI devices this driver supports
125  */
126 static const struct rte_pci_id pci_id_atl_map[] = {
127         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
128         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
129         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
130         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
131         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
132
133         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
134         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
135         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
136         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
137         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
138         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
139
140         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
141         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
142         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
143         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
144         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
145         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
146
147         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
148         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
149         { .vendor_id = 0, /* sentinel */ },
150 };
151
152 static struct rte_pci_driver rte_atl_pmd = {
153         .id_table = pci_id_atl_map,
154         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
155         .probe = eth_atl_pci_probe,
156         .remove = eth_atl_pci_remove,
157 };
158
159 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
160                         | DEV_RX_OFFLOAD_IPV4_CKSUM \
161                         | DEV_RX_OFFLOAD_UDP_CKSUM \
162                         | DEV_RX_OFFLOAD_TCP_CKSUM \
163                         | DEV_RX_OFFLOAD_JUMBO_FRAME \
164                         | DEV_RX_OFFLOAD_MACSEC_STRIP \
165                         | DEV_RX_OFFLOAD_VLAN_FILTER)
166
167 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
168                         | DEV_TX_OFFLOAD_IPV4_CKSUM \
169                         | DEV_TX_OFFLOAD_UDP_CKSUM \
170                         | DEV_TX_OFFLOAD_TCP_CKSUM \
171                         | DEV_TX_OFFLOAD_TCP_TSO \
172                         | DEV_TX_OFFLOAD_MACSEC_INSERT \
173                         | DEV_TX_OFFLOAD_MULTI_SEGS)
174
175 #define SFP_EEPROM_SIZE 0x100
176
177 static const struct rte_eth_desc_lim rx_desc_lim = {
178         .nb_max = ATL_MAX_RING_DESC,
179         .nb_min = ATL_MIN_RING_DESC,
180         .nb_align = ATL_RXD_ALIGN,
181 };
182
183 static const struct rte_eth_desc_lim tx_desc_lim = {
184         .nb_max = ATL_MAX_RING_DESC,
185         .nb_min = ATL_MIN_RING_DESC,
186         .nb_align = ATL_TXD_ALIGN,
187         .nb_seg_max = ATL_TX_MAX_SEG,
188         .nb_mtu_seg_max = ATL_TX_MAX_SEG,
189 };
190
191 enum atl_xstats_type {
192         XSTATS_TYPE_MSM = 0,
193         XSTATS_TYPE_MACSEC,
194 };
195
196 #define ATL_XSTATS_FIELD(name) { \
197         #name, \
198         offsetof(struct aq_stats_s, name), \
199         XSTATS_TYPE_MSM \
200 }
201
202 #define ATL_MACSEC_XSTATS_FIELD(name) { \
203         #name, \
204         offsetof(struct macsec_stats, name), \
205         XSTATS_TYPE_MACSEC \
206 }
207
208 struct atl_xstats_tbl_s {
209         const char *name;
210         unsigned int offset;
211         enum atl_xstats_type type;
212 };
213
214 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
215         ATL_XSTATS_FIELD(uprc),
216         ATL_XSTATS_FIELD(mprc),
217         ATL_XSTATS_FIELD(bprc),
218         ATL_XSTATS_FIELD(erpt),
219         ATL_XSTATS_FIELD(uptc),
220         ATL_XSTATS_FIELD(mptc),
221         ATL_XSTATS_FIELD(bptc),
222         ATL_XSTATS_FIELD(erpr),
223         ATL_XSTATS_FIELD(ubrc),
224         ATL_XSTATS_FIELD(ubtc),
225         ATL_XSTATS_FIELD(mbrc),
226         ATL_XSTATS_FIELD(mbtc),
227         ATL_XSTATS_FIELD(bbrc),
228         ATL_XSTATS_FIELD(bbtc),
229         /* Ingress Common Counters */
230         ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
231         ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
232         ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
233         ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
234         ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
235         ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
236         ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
237         ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
238         /* Ingress SA Counters */
239         ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
240         ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
241         ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
242         ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
243         ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
244         ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
245         ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
246         ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
247         ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
248         /* Egress Common Counters */
249         ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
250         ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
251         ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
252         ATL_MACSEC_XSTATS_FIELD(out_too_long),
253         /* Egress SC Counters */
254         ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
255         ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
256         /* Egress SA Counters */
257         ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
258         ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
259         ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
260         ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
261 };
262
263 static const struct eth_dev_ops atl_eth_dev_ops = {
264         .dev_configure        = atl_dev_configure,
265         .dev_start            = atl_dev_start,
266         .dev_stop             = atl_dev_stop,
267         .dev_set_link_up      = atl_dev_set_link_up,
268         .dev_set_link_down    = atl_dev_set_link_down,
269         .dev_close            = atl_dev_close,
270         .dev_reset            = atl_dev_reset,
271
272         /* PROMISC */
273         .promiscuous_enable   = atl_dev_promiscuous_enable,
274         .promiscuous_disable  = atl_dev_promiscuous_disable,
275         .allmulticast_enable  = atl_dev_allmulticast_enable,
276         .allmulticast_disable = atl_dev_allmulticast_disable,
277
278         /* Link */
279         .link_update          = atl_dev_link_update,
280
281         .get_reg              = atl_dev_get_regs,
282
283         /* Stats */
284         .stats_get            = atl_dev_stats_get,
285         .xstats_get           = atl_dev_xstats_get,
286         .xstats_get_names     = atl_dev_xstats_get_names,
287         .stats_reset          = atl_dev_stats_reset,
288         .xstats_reset         = atl_dev_stats_reset,
289
290         .fw_version_get       = atl_fw_version_get,
291         .dev_infos_get        = atl_dev_info_get,
292         .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
293
294         .mtu_set              = atl_dev_mtu_set,
295
296         /* VLAN */
297         .vlan_filter_set      = atl_vlan_filter_set,
298         .vlan_offload_set     = atl_vlan_offload_set,
299         .vlan_tpid_set        = atl_vlan_tpid_set,
300         .vlan_strip_queue_set = atl_vlan_strip_queue_set,
301
302         /* Queue Control */
303         .rx_queue_start       = atl_rx_queue_start,
304         .rx_queue_stop        = atl_rx_queue_stop,
305         .rx_queue_setup       = atl_rx_queue_setup,
306         .rx_queue_release     = atl_rx_queue_release,
307
308         .tx_queue_start       = atl_tx_queue_start,
309         .tx_queue_stop        = atl_tx_queue_stop,
310         .tx_queue_setup       = atl_tx_queue_setup,
311         .tx_queue_release     = atl_tx_queue_release,
312
313         .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
314         .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
315
316         /* EEPROM */
317         .get_eeprom_length    = atl_dev_get_eeprom_length,
318         .get_eeprom           = atl_dev_get_eeprom,
319         .set_eeprom           = atl_dev_set_eeprom,
320
321         /* Flow Control */
322         .flow_ctrl_get        = atl_flow_ctrl_get,
323         .flow_ctrl_set        = atl_flow_ctrl_set,
324
325         /* MAC */
326         .mac_addr_add         = atl_add_mac_addr,
327         .mac_addr_remove      = atl_remove_mac_addr,
328         .mac_addr_set         = atl_set_default_mac_addr,
329         .set_mc_addr_list     = atl_dev_set_mc_addr_list,
330         .rxq_info_get         = atl_rxq_info_get,
331         .txq_info_get         = atl_txq_info_get,
332
333         .reta_update          = atl_reta_update,
334         .reta_query           = atl_reta_query,
335         .rss_hash_update      = atl_rss_hash_update,
336         .rss_hash_conf_get    = atl_rss_hash_conf_get,
337 };
338
339 static inline int32_t
340 atl_reset_hw(struct aq_hw_s *hw)
341 {
342         return hw_atl_b0_hw_reset(hw);
343 }
344
345 static inline void
346 atl_enable_intr(struct rte_eth_dev *dev)
347 {
348         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
349
350         hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
351 }
352
353 static void
354 atl_disable_intr(struct aq_hw_s *hw)
355 {
356         PMD_INIT_FUNC_TRACE();
357         hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
358 }
359
360 static int
361 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
362 {
363         struct atl_adapter *adapter = eth_dev->data->dev_private;
364         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
365         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
366         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
367         int err = 0;
368
369         PMD_INIT_FUNC_TRACE();
370
371         eth_dev->dev_ops = &atl_eth_dev_ops;
372
373         eth_dev->rx_queue_count       = atl_rx_queue_count;
374         eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
375         eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
376
377         eth_dev->rx_pkt_burst = &atl_recv_pkts;
378         eth_dev->tx_pkt_burst = &atl_xmit_pkts;
379         eth_dev->tx_pkt_prepare = &atl_prep_pkts;
380
381         /* For secondary processes, the primary process has done all the work */
382         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
383                 return 0;
384
385         /* Vendor and Device ID need to be set before init of shared code */
386         hw->device_id = pci_dev->id.device_id;
387         hw->vendor_id = pci_dev->id.vendor_id;
388         hw->mmio = (void *)pci_dev->mem_resource[0].addr;
389
390         /* Hardware configuration - hardcode */
391         adapter->hw_cfg.is_lro = false;
392         adapter->hw_cfg.wol = false;
393         adapter->hw_cfg.is_rss = false;
394         adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
395
396         adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
397                           AQ_NIC_RATE_5G |
398                           AQ_NIC_RATE_2G5 |
399                           AQ_NIC_RATE_1G |
400                           AQ_NIC_RATE_100M;
401
402         adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
403         adapter->hw_cfg.aq_rss.indirection_table_size =
404                 HW_ATL_B0_RSS_REDIRECTION_MAX;
405
406         hw->aq_nic_cfg = &adapter->hw_cfg;
407
408         pthread_mutex_init(&hw->mbox_mutex, NULL);
409
410         /* disable interrupt */
411         atl_disable_intr(hw);
412
413         /* Allocate memory for storing MAC addresses */
414         eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
415                                         RTE_ETHER_ADDR_LEN, 0);
416         if (eth_dev->data->mac_addrs == NULL) {
417                 PMD_INIT_LOG(ERR, "MAC Malloc failed");
418                 return -ENOMEM;
419         }
420
421         err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
422         if (err)
423                 return err;
424
425         /* Copy the permanent MAC address */
426         if (hw->aq_fw_ops->get_mac_permanent(hw,
427                         eth_dev->data->mac_addrs->addr_bytes) != 0)
428                 return -EINVAL;
429
430         /* Reset the hw statistics */
431         atl_dev_stats_reset(eth_dev);
432
433         rte_intr_callback_register(intr_handle,
434                                    atl_dev_interrupt_handler, eth_dev);
435
436         /* enable uio/vfio intr/eventfd mapping */
437         rte_intr_enable(intr_handle);
438
439         /* enable support intr */
440         atl_enable_intr(eth_dev);
441
442         return err;
443 }
444
445 static int
446 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
447 {
448         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
449         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
450         struct aq_hw_s *hw;
451
452         PMD_INIT_FUNC_TRACE();
453
454         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
455                 return -EPERM;
456
457         hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
458
459         if (hw->adapter_stopped == 0)
460                 atl_dev_close(eth_dev);
461
462         eth_dev->dev_ops = NULL;
463         eth_dev->rx_pkt_burst = NULL;
464         eth_dev->tx_pkt_burst = NULL;
465
466         /* disable uio intr before callback unregister */
467         rte_intr_disable(intr_handle);
468         rte_intr_callback_unregister(intr_handle,
469                                      atl_dev_interrupt_handler, eth_dev);
470
471         rte_free(eth_dev->data->mac_addrs);
472         eth_dev->data->mac_addrs = NULL;
473
474         pthread_mutex_destroy(&hw->mbox_mutex);
475
476         return 0;
477 }
478
479 static int
480 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
481         struct rte_pci_device *pci_dev)
482 {
483         return rte_eth_dev_pci_generic_probe(pci_dev,
484                 sizeof(struct atl_adapter), eth_atl_dev_init);
485 }
486
487 static int
488 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
489 {
490         return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
491 }
492
493 static int
494 atl_dev_configure(struct rte_eth_dev *dev)
495 {
496         struct atl_interrupt *intr =
497                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
498
499         PMD_INIT_FUNC_TRACE();
500
501         /* set flag to update link status after init */
502         intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
503
504         return 0;
505 }
506
507 /*
508  * Configure device link speed and setup link.
509  * It returns 0 on success.
510  */
511 static int
512 atl_dev_start(struct rte_eth_dev *dev)
513 {
514         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
516         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
517         uint32_t intr_vector = 0;
518         int status;
519         int err;
520
521         PMD_INIT_FUNC_TRACE();
522
523         /* set adapter started */
524         hw->adapter_stopped = 0;
525
526         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
527                 PMD_INIT_LOG(ERR,
528                 "Invalid link_speeds for port %u, fix speed not supported",
529                                 dev->data->port_id);
530                 return -EINVAL;
531         }
532
533         /* disable uio/vfio intr/eventfd mapping */
534         rte_intr_disable(intr_handle);
535
536         /* reinitialize adapter
537          * this calls reset and start
538          */
539         status = atl_reset_hw(hw);
540         if (status != 0)
541                 return -EIO;
542
543         err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
544
545         hw_atl_b0_hw_start(hw);
546         /* check and configure queue intr-vector mapping */
547         if ((rte_intr_cap_multiple(intr_handle) ||
548             !RTE_ETH_DEV_SRIOV(dev).active) &&
549             dev->data->dev_conf.intr_conf.rxq != 0) {
550                 intr_vector = dev->data->nb_rx_queues;
551                 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
552                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
553                                         ATL_MAX_INTR_QUEUE_NUM);
554                         return -ENOTSUP;
555                 }
556                 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
557                         PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
558                         return -1;
559                 }
560         }
561
562         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
563                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
564                                     dev->data->nb_rx_queues * sizeof(int), 0);
565                 if (intr_handle->intr_vec == NULL) {
566                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
567                                      " intr_vec", dev->data->nb_rx_queues);
568                         return -ENOMEM;
569                 }
570         }
571
572         /* initialize transmission unit */
573         atl_tx_init(dev);
574
575         /* This can fail when allocating mbufs for descriptor rings */
576         err = atl_rx_init(dev);
577         if (err) {
578                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
579                 goto error;
580         }
581
582         PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
583                 hw->fw_ver_actual >> 24,
584                 (hw->fw_ver_actual >> 16) & 0xFF,
585                 hw->fw_ver_actual & 0xFFFF);
586         PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
587
588         err = atl_start_queues(dev);
589         if (err < 0) {
590                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
591                 goto error;
592         }
593
594         err = atl_dev_set_link_up(dev);
595
596         err = hw->aq_fw_ops->update_link_status(hw);
597
598         if (err)
599                 goto error;
600
601         dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
602
603         if (rte_intr_allow_others(intr_handle)) {
604                 /* check if lsc interrupt is enabled */
605                 if (dev->data->dev_conf.intr_conf.lsc != 0)
606                         atl_dev_lsc_interrupt_setup(dev, true);
607                 else
608                         atl_dev_lsc_interrupt_setup(dev, false);
609         } else {
610                 rte_intr_callback_unregister(intr_handle,
611                                              atl_dev_interrupt_handler, dev);
612                 if (dev->data->dev_conf.intr_conf.lsc != 0)
613                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
614                                      " no intr multiplex");
615         }
616
617         /* check if rxq interrupt is enabled */
618         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
619             rte_intr_dp_is_en(intr_handle))
620                 atl_dev_rxq_interrupt_setup(dev);
621
622         /* enable uio/vfio intr/eventfd mapping */
623         rte_intr_enable(intr_handle);
624
625         /* resume enabled intr since hw reset */
626         atl_enable_intr(dev);
627
628         return 0;
629
630 error:
631         atl_stop_queues(dev);
632         return -EIO;
633 }
634
635 /*
636  * Stop device: disable rx and tx functions to allow for reconfiguring.
637  */
638 static void
639 atl_dev_stop(struct rte_eth_dev *dev)
640 {
641         struct rte_eth_link link;
642         struct aq_hw_s *hw =
643                 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
644         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
645         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
646
647         PMD_INIT_FUNC_TRACE();
648
649         /* disable interrupts */
650         atl_disable_intr(hw);
651
652         /* reset the NIC */
653         atl_reset_hw(hw);
654         hw->adapter_stopped = 1;
655
656         atl_stop_queues(dev);
657
658         /* Clear stored conf */
659         dev->data->scattered_rx = 0;
660         dev->data->lro = 0;
661
662         /* Clear recorded link status */
663         memset(&link, 0, sizeof(link));
664         rte_eth_linkstatus_set(dev, &link);
665
666         if (!rte_intr_allow_others(intr_handle))
667                 /* resume to the default handler */
668                 rte_intr_callback_register(intr_handle,
669                                            atl_dev_interrupt_handler,
670                                            (void *)dev);
671
672         /* Clean datapath event and queue/vec mapping */
673         rte_intr_efd_disable(intr_handle);
674         if (intr_handle->intr_vec != NULL) {
675                 rte_free(intr_handle->intr_vec);
676                 intr_handle->intr_vec = NULL;
677         }
678 }
679
680 /*
681  * Set device link up: enable tx.
682  */
683 static int
684 atl_dev_set_link_up(struct rte_eth_dev *dev)
685 {
686         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
687         uint32_t link_speeds = dev->data->dev_conf.link_speeds;
688         uint32_t speed_mask = 0;
689
690         if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
691                 speed_mask = hw->aq_nic_cfg->link_speed_msk;
692         } else {
693                 if (link_speeds & ETH_LINK_SPEED_10G)
694                         speed_mask |= AQ_NIC_RATE_10G;
695                 if (link_speeds & ETH_LINK_SPEED_5G)
696                         speed_mask |= AQ_NIC_RATE_5G;
697                 if (link_speeds & ETH_LINK_SPEED_1G)
698                         speed_mask |= AQ_NIC_RATE_1G;
699                 if (link_speeds & ETH_LINK_SPEED_2_5G)
700                         speed_mask |=  AQ_NIC_RATE_2G5;
701                 if (link_speeds & ETH_LINK_SPEED_100M)
702                         speed_mask |= AQ_NIC_RATE_100M;
703         }
704
705         return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
706 }
707
708 /*
709  * Set device link down: disable tx.
710  */
711 static int
712 atl_dev_set_link_down(struct rte_eth_dev *dev)
713 {
714         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
715
716         return hw->aq_fw_ops->set_link_speed(hw, 0);
717 }
718
719 /*
720  * Reset and stop device.
721  */
722 static void
723 atl_dev_close(struct rte_eth_dev *dev)
724 {
725         PMD_INIT_FUNC_TRACE();
726
727         atl_dev_stop(dev);
728
729         atl_free_queues(dev);
730 }
731
732 static int
733 atl_dev_reset(struct rte_eth_dev *dev)
734 {
735         int ret;
736
737         ret = eth_atl_dev_uninit(dev);
738         if (ret)
739                 return ret;
740
741         ret = eth_atl_dev_init(dev);
742
743         return ret;
744 }
745
746 static int
747 atl_dev_configure_macsec(struct rte_eth_dev *dev)
748 {
749         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
750         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
751         struct aq_macsec_config *aqcfg = &cf->aq_macsec;
752         struct macsec_msg_fw_request msg_macsec;
753         struct macsec_msg_fw_response response;
754
755         if (!aqcfg->common.macsec_enabled ||
756             hw->aq_fw_ops->send_macsec_req == NULL)
757                 return 0;
758
759         memset(&msg_macsec, 0, sizeof(msg_macsec));
760
761         /* Creating set of sc/sa structures from parameters provided by DPDK */
762
763         /* Configure macsec */
764         msg_macsec.msg_type = macsec_cfg_msg;
765         msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
766         msg_macsec.cfg.interrupts_enabled = 1;
767
768         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
769
770         if (response.result)
771                 return -1;
772
773         memset(&msg_macsec, 0, sizeof(msg_macsec));
774
775         /* Configure TX SC */
776
777         msg_macsec.msg_type = macsec_add_tx_sc_msg;
778         msg_macsec.txsc.index = 0; /* TXSC always one (??) */
779         msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
780
781         /* MAC addr for TX */
782         msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
783         msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
784         msg_macsec.txsc.sa_mask = 0x3f;
785
786         msg_macsec.txsc.da_mask = 0;
787         msg_macsec.txsc.tci = 0x0B;
788         msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
789
790         /*
791          * Creating SCI (Secure Channel Identifier).
792          * SCI constructed from Source MAC and Port identifier
793          */
794         uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
795                                (msg_macsec.txsc.mac_sa[0] >> 16);
796         uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
797
798         uint32_t port_identifier = 1;
799
800         msg_macsec.txsc.sci[1] = sci_hi_part;
801         msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
802
803         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
804
805         if (response.result)
806                 return -1;
807
808         memset(&msg_macsec, 0, sizeof(msg_macsec));
809
810         /* Configure RX SC */
811
812         msg_macsec.msg_type = macsec_add_rx_sc_msg;
813         msg_macsec.rxsc.index = aqcfg->rxsc.pi;
814         msg_macsec.rxsc.replay_protect =
815                 aqcfg->common.replay_protection_enabled;
816         msg_macsec.rxsc.anti_replay_window = 0;
817
818         /* MAC addr for RX */
819         msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
820         msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
821         msg_macsec.rxsc.da_mask = 0;//0x3f;
822
823         msg_macsec.rxsc.sa_mask = 0;
824
825         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
826
827         if (response.result)
828                 return -1;
829
830         memset(&msg_macsec, 0, sizeof(msg_macsec));
831
832         /* Configure RX SC */
833
834         msg_macsec.msg_type = macsec_add_tx_sa_msg;
835         msg_macsec.txsa.index = aqcfg->txsa.idx;
836         msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
837
838         msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
839         msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
840         msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
841         msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
842
843         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
844
845         if (response.result)
846                 return -1;
847
848         memset(&msg_macsec, 0, sizeof(msg_macsec));
849
850         /* Configure RX SA */
851
852         msg_macsec.msg_type = macsec_add_rx_sa_msg;
853         msg_macsec.rxsa.index = aqcfg->rxsa.idx;
854         msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
855
856         msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
857         msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
858         msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
859         msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
860
861         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
862
863         if (response.result)
864                 return -1;
865
866         return 0;
867 }
868
869 int atl_macsec_enable(struct rte_eth_dev *dev,
870                       uint8_t encr, uint8_t repl_prot)
871 {
872         struct aq_hw_cfg_s *cfg =
873                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
874
875         cfg->aq_macsec.common.macsec_enabled = 1;
876         cfg->aq_macsec.common.encryption_enabled = encr;
877         cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
878
879         return 0;
880 }
881
882 int atl_macsec_disable(struct rte_eth_dev *dev)
883 {
884         struct aq_hw_cfg_s *cfg =
885                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
886
887         cfg->aq_macsec.common.macsec_enabled = 0;
888
889         return 0;
890 }
891
892 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
893 {
894         struct aq_hw_cfg_s *cfg =
895                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
896
897         memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
898         memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
899                 RTE_ETHER_ADDR_LEN);
900
901         return 0;
902 }
903
904 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
905                            uint8_t *mac, uint16_t pi)
906 {
907         struct aq_hw_cfg_s *cfg =
908                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
909
910         memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
911         memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
912                 RTE_ETHER_ADDR_LEN);
913         cfg->aq_macsec.rxsc.pi = pi;
914
915         return 0;
916 }
917
918 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
919                            uint8_t idx, uint8_t an,
920                            uint32_t pn, uint8_t *key)
921 {
922         struct aq_hw_cfg_s *cfg =
923                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
924
925         cfg->aq_macsec.txsa.idx = idx;
926         cfg->aq_macsec.txsa.pn = pn;
927         cfg->aq_macsec.txsa.an = an;
928
929         memcpy(&cfg->aq_macsec.txsa.key, key, 16);
930         return 0;
931 }
932
933 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
934                            uint8_t idx, uint8_t an,
935                            uint32_t pn, uint8_t *key)
936 {
937         struct aq_hw_cfg_s *cfg =
938                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
939
940         cfg->aq_macsec.rxsa.idx = idx;
941         cfg->aq_macsec.rxsa.pn = pn;
942         cfg->aq_macsec.rxsa.an = an;
943
944         memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
945         return 0;
946 }
947
948 static int
949 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
950 {
951         struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
952         struct aq_hw_s *hw = &adapter->hw;
953         struct atl_sw_stats *swstats = &adapter->sw_stats;
954         unsigned int i;
955
956         hw->aq_fw_ops->update_stats(hw);
957
958         /* Fill out the rte_eth_stats statistics structure */
959         stats->ipackets = hw->curr_stats.dma_pkt_rc;
960         stats->ibytes = hw->curr_stats.dma_oct_rc;
961         stats->imissed = hw->curr_stats.dpc;
962         stats->ierrors = hw->curr_stats.erpt;
963
964         stats->opackets = hw->curr_stats.dma_pkt_tc;
965         stats->obytes = hw->curr_stats.dma_oct_tc;
966         stats->oerrors = 0;
967
968         stats->rx_nombuf = swstats->rx_nombuf;
969
970         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
971                 stats->q_ipackets[i] = swstats->q_ipackets[i];
972                 stats->q_opackets[i] = swstats->q_opackets[i];
973                 stats->q_ibytes[i] = swstats->q_ibytes[i];
974                 stats->q_obytes[i] = swstats->q_obytes[i];
975                 stats->q_errors[i] = swstats->q_errors[i];
976         }
977         return 0;
978 }
979
980 static int
981 atl_dev_stats_reset(struct rte_eth_dev *dev)
982 {
983         struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
984         struct aq_hw_s *hw = &adapter->hw;
985
986         hw->aq_fw_ops->update_stats(hw);
987
988         /* Reset software totals */
989         memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
990
991         memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
992
993         return 0;
994 }
995
996 static int
997 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
998 {
999         struct atl_adapter *adapter =
1000                 (struct atl_adapter *)dev->data->dev_private;
1001
1002         struct aq_hw_s *hw = &adapter->hw;
1003         unsigned int i, count = 0;
1004
1005         for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
1006                 if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
1007                         ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
1008                         continue;
1009
1010                 count++;
1011         }
1012
1013         return count;
1014 }
1015
1016 static int
1017 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1018                          struct rte_eth_xstat_name *xstats_names,
1019                          unsigned int size)
1020 {
1021         unsigned int i;
1022         unsigned int count = atl_dev_xstats_get_count(dev);
1023
1024         if (xstats_names) {
1025                 for (i = 0; i < size && i < count; i++) {
1026                         snprintf(xstats_names[i].name,
1027                                 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1028                                 atl_xstats_tbl[i].name);
1029                 }
1030         }
1031
1032         return count;
1033 }
1034
1035 static int
1036 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1037                    unsigned int n)
1038 {
1039         struct atl_adapter *adapter = dev->data->dev_private;
1040         struct aq_hw_s *hw = &adapter->hw;
1041         struct get_stats req = { 0 };
1042         struct macsec_msg_fw_request msg = { 0 };
1043         struct macsec_msg_fw_response resp = { 0 };
1044         int err = -1;
1045         unsigned int i;
1046         unsigned int count = atl_dev_xstats_get_count(dev);
1047
1048         if (!stats)
1049                 return count;
1050
1051         if (hw->aq_fw_ops->send_macsec_req != NULL) {
1052                 req.ingress_sa_index = 0xff;
1053                 req.egress_sc_index = 0xff;
1054                 req.egress_sa_index = 0xff;
1055
1056                 msg.msg_type = macsec_get_stats_msg;
1057                 msg.stats = req;
1058
1059                 err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1060         }
1061
1062         for (i = 0; i < n && i < count; i++) {
1063                 stats[i].id = i;
1064
1065                 switch (atl_xstats_tbl[i].type) {
1066                 case XSTATS_TYPE_MSM:
1067                         stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1068                                          atl_xstats_tbl[i].offset);
1069                         break;
1070                 case XSTATS_TYPE_MACSEC:
1071                         if (!err) {
1072                                 stats[i].value =
1073                                         *(u64 *)((uint8_t *)&resp.stats +
1074                                         atl_xstats_tbl[i].offset);
1075                         }
1076                         break;
1077                 }
1078         }
1079
1080         return i;
1081 }
1082
1083 static int
1084 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1085 {
1086         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1087         uint32_t fw_ver = 0;
1088         unsigned int ret = 0;
1089
1090         ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1091         if (ret)
1092                 return -EIO;
1093
1094         ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1095                        (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1096
1097         ret += 1; /* add string null-terminator */
1098
1099         if (fw_size < ret)
1100                 return ret;
1101
1102         return 0;
1103 }
1104
1105 static int
1106 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1107 {
1108         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1109
1110         dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1111         dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1112
1113         dev_info->min_rx_bufsize = 1024;
1114         dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1115         dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1116         dev_info->max_vfs = pci_dev->max_vfs;
1117
1118         dev_info->max_hash_mac_addrs = 0;
1119         dev_info->max_vmdq_pools = 0;
1120         dev_info->vmdq_queue_num = 0;
1121
1122         dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1123
1124         dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1125
1126
1127         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1128                 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1129         };
1130
1131         dev_info->default_txconf = (struct rte_eth_txconf) {
1132                 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1133         };
1134
1135         dev_info->rx_desc_lim = rx_desc_lim;
1136         dev_info->tx_desc_lim = tx_desc_lim;
1137
1138         dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1139         dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1140         dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1141
1142         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1143         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1144         dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1145         dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1146
1147         return 0;
1148 }
1149
1150 static const uint32_t *
1151 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1152 {
1153         static const uint32_t ptypes[] = {
1154                 RTE_PTYPE_L2_ETHER,
1155                 RTE_PTYPE_L2_ETHER_ARP,
1156                 RTE_PTYPE_L2_ETHER_VLAN,
1157                 RTE_PTYPE_L3_IPV4,
1158                 RTE_PTYPE_L3_IPV6,
1159                 RTE_PTYPE_L4_TCP,
1160                 RTE_PTYPE_L4_UDP,
1161                 RTE_PTYPE_L4_SCTP,
1162                 RTE_PTYPE_L4_ICMP,
1163                 RTE_PTYPE_UNKNOWN
1164         };
1165
1166         if (dev->rx_pkt_burst == atl_recv_pkts)
1167                 return ptypes;
1168
1169         return NULL;
1170 }
1171
1172 static void
1173 atl_dev_delayed_handler(void *param)
1174 {
1175         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1176
1177         atl_dev_configure_macsec(dev);
1178 }
1179
1180
1181 /* return 0 means link status changed, -1 means not changed */
1182 static int
1183 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1184 {
1185         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1186         struct rte_eth_link link, old;
1187         u32 fc = AQ_NIC_FC_OFF;
1188         int err = 0;
1189
1190         link.link_status = ETH_LINK_DOWN;
1191         link.link_speed = 0;
1192         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1193         link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1194         memset(&old, 0, sizeof(old));
1195
1196         /* load old link status */
1197         rte_eth_linkstatus_get(dev, &old);
1198
1199         /* read current link status */
1200         err = hw->aq_fw_ops->update_link_status(hw);
1201
1202         if (err)
1203                 return 0;
1204
1205         if (hw->aq_link_status.mbps == 0) {
1206                 /* write default (down) link status */
1207                 rte_eth_linkstatus_set(dev, &link);
1208                 if (link.link_status == old.link_status)
1209                         return -1;
1210                 return 0;
1211         }
1212
1213         link.link_status = ETH_LINK_UP;
1214         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1215         link.link_speed = hw->aq_link_status.mbps;
1216
1217         rte_eth_linkstatus_set(dev, &link);
1218
1219         if (link.link_status == old.link_status)
1220                 return -1;
1221
1222         /* Driver has to update flow control settings on RX block
1223          * on any link event.
1224          * We should query FW whether it negotiated FC.
1225          */
1226         if (hw->aq_fw_ops->get_flow_control) {
1227                 hw->aq_fw_ops->get_flow_control(hw, &fc);
1228                 hw_atl_b0_set_fc(hw, fc, 0U);
1229         }
1230
1231         if (rte_eal_alarm_set(1000 * 1000,
1232                               atl_dev_delayed_handler, (void *)dev) < 0)
1233                 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1234
1235         return 0;
1236 }
1237
1238 static int
1239 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1240 {
1241         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1242
1243         hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1244
1245         return 0;
1246 }
1247
1248 static int
1249 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1250 {
1251         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1252
1253         hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1254
1255         return 0;
1256 }
1257
1258 static int
1259 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1260 {
1261         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1262
1263         hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1264
1265         return 0;
1266 }
1267
1268 static int
1269 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1270 {
1271         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1272
1273         if (dev->data->promiscuous == 1)
1274                 return 0; /* must remain in all_multicast mode */
1275
1276         hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1277
1278         return 0;
1279 }
1280
1281 /**
1282  * It clears the interrupt causes and enables the interrupt.
1283  * It will be called once only during nic initialized.
1284  *
1285  * @param dev
1286  *  Pointer to struct rte_eth_dev.
1287  * @param on
1288  *  Enable or Disable.
1289  *
1290  * @return
1291  *  - On success, zero.
1292  *  - On failure, a negative value.
1293  */
1294
1295 static int
1296 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1297 {
1298         atl_dev_link_status_print(dev);
1299         return 0;
1300 }
1301
1302 static int
1303 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1304 {
1305         return 0;
1306 }
1307
1308
1309 static int
1310 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1311 {
1312         struct atl_interrupt *intr =
1313                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1314         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1315         u64 cause = 0;
1316
1317         hw_atl_b0_hw_irq_read(hw, &cause);
1318
1319         atl_disable_intr(hw);
1320
1321         if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1322                 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1323
1324         return 0;
1325 }
1326
1327 /**
1328  * It gets and then prints the link status.
1329  *
1330  * @param dev
1331  *  Pointer to struct rte_eth_dev.
1332  *
1333  * @return
1334  *  - On success, zero.
1335  *  - On failure, a negative value.
1336  */
1337 static void
1338 atl_dev_link_status_print(struct rte_eth_dev *dev)
1339 {
1340         struct rte_eth_link link;
1341
1342         memset(&link, 0, sizeof(link));
1343         rte_eth_linkstatus_get(dev, &link);
1344         if (link.link_status) {
1345                 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1346                                         (int)(dev->data->port_id),
1347                                         (unsigned int)link.link_speed,
1348                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1349                                         "full-duplex" : "half-duplex");
1350         } else {
1351                 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1352                                 (int)(dev->data->port_id));
1353         }
1354
1355
1356 #ifdef DEBUG
1357 {
1358         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1359
1360         PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1361                                 pci_dev->addr.domain,
1362                                 pci_dev->addr.bus,
1363                                 pci_dev->addr.devid,
1364                                 pci_dev->addr.function);
1365 }
1366 #endif
1367
1368         PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1369 }
1370
1371 /*
1372  * It executes link_update after knowing an interrupt occurred.
1373  *
1374  * @param dev
1375  *  Pointer to struct rte_eth_dev.
1376  *
1377  * @return
1378  *  - On success, zero.
1379  *  - On failure, a negative value.
1380  */
1381 static int
1382 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1383                            struct rte_intr_handle *intr_handle)
1384 {
1385         struct atl_interrupt *intr =
1386                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1387         struct atl_adapter *adapter = dev->data->dev_private;
1388         struct aq_hw_s *hw = &adapter->hw;
1389
1390         if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1391                 goto done;
1392
1393         intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1394
1395         /* Notify userapp if link status changed */
1396         if (!atl_dev_link_update(dev, 0)) {
1397                 atl_dev_link_status_print(dev);
1398                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1399         } else {
1400                 if (hw->aq_fw_ops->send_macsec_req == NULL)
1401                         goto done;
1402
1403                 /* Check macsec Keys expired */
1404                 struct get_stats req = { 0 };
1405                 struct macsec_msg_fw_request msg = { 0 };
1406                 struct macsec_msg_fw_response resp = { 0 };
1407
1408                 req.ingress_sa_index = 0x0;
1409                 req.egress_sc_index = 0x0;
1410                 req.egress_sa_index = 0x0;
1411                 msg.msg_type = macsec_get_stats_msg;
1412                 msg.stats = req;
1413
1414                 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1415                 if (err) {
1416                         PMD_DRV_LOG(ERR, "send_macsec_req fail");
1417                         goto done;
1418                 }
1419                 if (resp.stats.egress_threshold_expired ||
1420                     resp.stats.ingress_threshold_expired ||
1421                     resp.stats.egress_expired ||
1422                     resp.stats.ingress_expired) {
1423                         PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1424                         rte_eth_dev_callback_process(dev,
1425                                 RTE_ETH_EVENT_MACSEC, NULL);
1426                 }
1427         }
1428 done:
1429         atl_enable_intr(dev);
1430         rte_intr_ack(intr_handle);
1431
1432         return 0;
1433 }
1434
1435 /**
1436  * Interrupt handler triggered by NIC  for handling
1437  * specific interrupt.
1438  *
1439  * @param handle
1440  *  Pointer to interrupt handle.
1441  * @param param
1442  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1443  *
1444  * @return
1445  *  void
1446  */
1447 static void
1448 atl_dev_interrupt_handler(void *param)
1449 {
1450         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1451
1452         atl_dev_interrupt_get_status(dev);
1453         atl_dev_interrupt_action(dev, dev->intr_handle);
1454 }
1455
1456
1457 static int
1458 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1459 {
1460         return SFP_EEPROM_SIZE;
1461 }
1462
1463 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1464                        struct rte_dev_eeprom_info *eeprom)
1465 {
1466         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1467         uint32_t dev_addr = SMBUS_DEVICE_ID;
1468
1469         if (hw->aq_fw_ops->get_eeprom == NULL)
1470                 return -ENOTSUP;
1471
1472         if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1473             eeprom->data == NULL)
1474                 return -EINVAL;
1475
1476         if (eeprom->magic > 0x7F)
1477                 return -EINVAL;
1478
1479         if (eeprom->magic)
1480                 dev_addr = eeprom->magic;
1481
1482         return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1483                                          eeprom->length, eeprom->offset);
1484 }
1485
1486 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1487                        struct rte_dev_eeprom_info *eeprom)
1488 {
1489         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1490         uint32_t dev_addr = SMBUS_DEVICE_ID;
1491
1492         if (hw->aq_fw_ops->set_eeprom == NULL)
1493                 return -ENOTSUP;
1494
1495         if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1496             eeprom->data == NULL)
1497                 return -EINVAL;
1498
1499         if (eeprom->magic > 0x7F)
1500                 return -EINVAL;
1501
1502         if (eeprom->magic)
1503                 dev_addr = eeprom->magic;
1504
1505         return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1506                                          eeprom->length, eeprom->offset);
1507 }
1508
1509 static int
1510 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1511 {
1512         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1513         u32 mif_id;
1514         int err;
1515
1516         if (regs->data == NULL) {
1517                 regs->length = hw_atl_utils_hw_get_reg_length();
1518                 regs->width = sizeof(u32);
1519                 return 0;
1520         }
1521
1522         /* Only full register dump is supported */
1523         if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1524                 return -ENOTSUP;
1525
1526         err = hw_atl_utils_hw_get_regs(hw, regs->data);
1527
1528         /* Device version */
1529         mif_id = hw_atl_reg_glb_mif_id_get(hw);
1530         regs->version = mif_id & 0xFFU;
1531
1532         return err;
1533 }
1534
1535 static int
1536 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1537 {
1538         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1539         u32 fc = AQ_NIC_FC_OFF;
1540
1541         if (hw->aq_fw_ops->get_flow_control == NULL)
1542                 return -ENOTSUP;
1543
1544         hw->aq_fw_ops->get_flow_control(hw, &fc);
1545
1546         if (fc == AQ_NIC_FC_OFF)
1547                 fc_conf->mode = RTE_FC_NONE;
1548         else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1549                 fc_conf->mode = RTE_FC_FULL;
1550         else if (fc & AQ_NIC_FC_RX)
1551                 fc_conf->mode = RTE_FC_RX_PAUSE;
1552         else if (fc & AQ_NIC_FC_TX)
1553                 fc_conf->mode = RTE_FC_TX_PAUSE;
1554
1555         return 0;
1556 }
1557
1558 static int
1559 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1560 {
1561         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1562         uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1563
1564
1565         if (hw->aq_fw_ops->set_flow_control == NULL)
1566                 return -ENOTSUP;
1567
1568         if (fc_conf->mode == RTE_FC_NONE)
1569                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1570         else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1571                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1572         else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1573                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1574         else if (fc_conf->mode == RTE_FC_FULL)
1575                 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1576
1577         if (old_flow_control != hw->aq_nic_cfg->flow_control)
1578                 return hw->aq_fw_ops->set_flow_control(hw);
1579
1580         return 0;
1581 }
1582
1583 static int
1584 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1585                     u8 *mac_addr, bool enable)
1586 {
1587         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1588         unsigned int h = 0U;
1589         unsigned int l = 0U;
1590         int err;
1591
1592         if (mac_addr) {
1593                 h = (mac_addr[0] << 8) | (mac_addr[1]);
1594                 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1595                         (mac_addr[4] << 8) | mac_addr[5];
1596         }
1597
1598         hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1599         hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1600         hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1601
1602         if (enable)
1603                 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1604
1605         err = aq_hw_err_from_flags(hw);
1606
1607         return err;
1608 }
1609
1610 static int
1611 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1612                         uint32_t index __rte_unused, uint32_t pool __rte_unused)
1613 {
1614         if (rte_is_zero_ether_addr(mac_addr)) {
1615                 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1616                 return -EINVAL;
1617         }
1618
1619         return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1620 }
1621
1622 static void
1623 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1624 {
1625         atl_update_mac_addr(dev, index, NULL, false);
1626 }
1627
1628 static int
1629 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1630 {
1631         atl_remove_mac_addr(dev, 0);
1632         atl_add_mac_addr(dev, addr, 0, 0);
1633         return 0;
1634 }
1635
1636 static int
1637 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1638 {
1639         struct rte_eth_dev_info dev_info;
1640         int ret;
1641         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1642
1643         ret = atl_dev_info_get(dev, &dev_info);
1644         if (ret != 0)
1645                 return ret;
1646
1647         if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1648                 return -EINVAL;
1649
1650         /* update max frame size */
1651         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1652
1653         return 0;
1654 }
1655
1656 static int
1657 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1658 {
1659         struct aq_hw_cfg_s *cfg =
1660                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1661         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1662         int err = 0;
1663         int i = 0;
1664
1665         PMD_INIT_FUNC_TRACE();
1666
1667         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1668                 if (cfg->vlan_filter[i] == vlan_id) {
1669                         if (!on) {
1670                                 /* Disable VLAN filter. */
1671                                 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1672
1673                                 /* Clear VLAN filter entry */
1674                                 cfg->vlan_filter[i] = 0;
1675                         }
1676                         break;
1677                 }
1678         }
1679
1680         /* VLAN_ID was not found. So, nothing to delete. */
1681         if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1682                 goto exit;
1683
1684         /* VLAN_ID already exist, or already removed above. Nothing to do. */
1685         if (i != HW_ATL_B0_MAX_VLAN_IDS)
1686                 goto exit;
1687
1688         /* Try to found free VLAN filter to add new VLAN_ID */
1689         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1690                 if (cfg->vlan_filter[i] == 0)
1691                         break;
1692         }
1693
1694         if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1695                 /* We have no free VLAN filter to add new VLAN_ID*/
1696                 err = -ENOMEM;
1697                 goto exit;
1698         }
1699
1700         cfg->vlan_filter[i] = vlan_id;
1701         hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1702         hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1703         hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1704
1705 exit:
1706         /* Enable VLAN promisc mode if vlan_filter empty  */
1707         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1708                 if (cfg->vlan_filter[i] != 0)
1709                         break;
1710         }
1711
1712         hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1713
1714         return err;
1715 }
1716
1717 static int
1718 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1719 {
1720         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1721         struct aq_hw_cfg_s *cfg =
1722                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1723         int i;
1724
1725         PMD_INIT_FUNC_TRACE();
1726
1727         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1728                 if (cfg->vlan_filter[i])
1729                         hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1730         }
1731         return 0;
1732 }
1733
1734 static int
1735 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1736 {
1737         struct aq_hw_cfg_s *cfg =
1738                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1739         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1740         int ret = 0;
1741         int i;
1742
1743         PMD_INIT_FUNC_TRACE();
1744
1745         ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1746
1747         cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1748
1749         for (i = 0; i < dev->data->nb_rx_queues; i++)
1750                 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1751
1752         if (mask & ETH_VLAN_EXTEND_MASK)
1753                 ret = -ENOTSUP;
1754
1755         return ret;
1756 }
1757
1758 static int
1759 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1760                   uint16_t tpid)
1761 {
1762         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1763         int err = 0;
1764
1765         PMD_INIT_FUNC_TRACE();
1766
1767         switch (vlan_type) {
1768         case ETH_VLAN_TYPE_INNER:
1769                 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1770                 break;
1771         case ETH_VLAN_TYPE_OUTER:
1772                 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1773                 break;
1774         default:
1775                 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1776                 err = -ENOTSUP;
1777         }
1778
1779         return err;
1780 }
1781
1782 static void
1783 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1784 {
1785         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1786
1787         PMD_INIT_FUNC_TRACE();
1788
1789         if (queue_id > dev->data->nb_rx_queues) {
1790                 PMD_DRV_LOG(ERR, "Invalid queue id");
1791                 return;
1792         }
1793
1794         hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1795 }
1796
1797 static int
1798 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1799                           struct rte_ether_addr *mc_addr_set,
1800                           uint32_t nb_mc_addr)
1801 {
1802         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1803         u32 i;
1804
1805         if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1806                 return -EINVAL;
1807
1808         /* Update whole uc filters table */
1809         for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1810                 u8 *mac_addr = NULL;
1811                 u32 l = 0, h = 0;
1812
1813                 if (i < nb_mc_addr) {
1814                         mac_addr = mc_addr_set[i].addr_bytes;
1815                         l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1816                                 (mac_addr[4] << 8) | mac_addr[5];
1817                         h = (mac_addr[0] << 8) | mac_addr[1];
1818                 }
1819
1820                 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1821                 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1822                                                         HW_ATL_B0_MAC_MIN + i);
1823                 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1824                                                         HW_ATL_B0_MAC_MIN + i);
1825                 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1826                                            HW_ATL_B0_MAC_MIN + i);
1827         }
1828
1829         return 0;
1830 }
1831
1832 static int
1833 atl_reta_update(struct rte_eth_dev *dev,
1834                    struct rte_eth_rss_reta_entry64 *reta_conf,
1835                    uint16_t reta_size)
1836 {
1837         int i;
1838         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1839         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1840
1841         for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1842                 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1843                                         dev->data->nb_rx_queues - 1);
1844
1845         hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1846         return 0;
1847 }
1848
1849 static int
1850 atl_reta_query(struct rte_eth_dev *dev,
1851                     struct rte_eth_rss_reta_entry64 *reta_conf,
1852                     uint16_t reta_size)
1853 {
1854         int i;
1855         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1856
1857         for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1858                 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1859         reta_conf->mask = ~0U;
1860         return 0;
1861 }
1862
1863 static int
1864 atl_rss_hash_update(struct rte_eth_dev *dev,
1865                                  struct rte_eth_rss_conf *rss_conf)
1866 {
1867         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1868         struct aq_hw_cfg_s *cfg =
1869                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1870         static u8 def_rss_key[40] = {
1871                 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1872                 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1873                 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1874                 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1875                 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1876         };
1877
1878         cfg->is_rss = !!rss_conf->rss_hf;
1879         if (rss_conf->rss_key) {
1880                 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1881                        rss_conf->rss_key_len);
1882                 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1883         } else {
1884                 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1885                        sizeof(def_rss_key));
1886                 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1887         }
1888
1889         hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1890         hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1891         return 0;
1892 }
1893
1894 static int
1895 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1896                                  struct rte_eth_rss_conf *rss_conf)
1897 {
1898         struct aq_hw_cfg_s *cfg =
1899                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1900
1901         rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1902         if (rss_conf->rss_key) {
1903                 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1904                 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1905                        rss_conf->rss_key_len);
1906         }
1907
1908         return 0;
1909 }
1910
1911 static bool
1912 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1913 {
1914         if (strcmp(dev->device->driver->name, drv->driver.name))
1915                 return false;
1916
1917         return true;
1918 }
1919
1920 bool
1921 is_atlantic_supported(struct rte_eth_dev *dev)
1922 {
1923         return is_device_supported(dev, &rte_atl_pmd);
1924 }
1925
1926 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1927 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1928 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1929 RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE);
1930 RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE);