ethdev: allow drivers to return error on close
[dpdk.git] / drivers / net / atlantic / atl_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
19
20 static int  atl_dev_configure(struct rte_eth_dev *dev);
21 static int  atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static int  atl_dev_close(struct rte_eth_dev *dev);
26 static int  atl_dev_reset(struct rte_eth_dev *dev);
27 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
32
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34                                     struct rte_eth_xstat_name *xstats_names,
35                                     unsigned int size);
36
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38                                 struct rte_eth_stats *stats);
39
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41                               struct rte_eth_xstat *stats, unsigned int n);
42
43 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
44
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46                               size_t fw_size);
47
48 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
49
50 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
51
52 /* VLAN stuff */
53 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
54                 uint16_t vlan_id, int on);
55
56 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
57
58 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
59                                      uint16_t queue_id, int on);
60
61 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
62                              enum rte_vlan_type vlan_type, uint16_t tpid);
63
64 /* EEPROM */
65 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
66 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
67                               struct rte_dev_eeprom_info *eeprom);
68 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
69                               struct rte_dev_eeprom_info *eeprom);
70
71 /* Regs */
72 static int atl_dev_get_regs(struct rte_eth_dev *dev,
73                             struct rte_dev_reg_info *regs);
74
75 /* Flow control */
76 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
77                                struct rte_eth_fc_conf *fc_conf);
78 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
79                                struct rte_eth_fc_conf *fc_conf);
80
81 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
82
83 /* Interrupts */
84 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
85 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
86 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
87 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
88                                     struct rte_intr_handle *handle);
89 static void atl_dev_interrupt_handler(void *param);
90
91
92 static int atl_add_mac_addr(struct rte_eth_dev *dev,
93                             struct rte_ether_addr *mac_addr,
94                             uint32_t index, uint32_t pool);
95 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
96 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
97                                            struct rte_ether_addr *mac_addr);
98
99 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
100                                     struct rte_ether_addr *mc_addr_set,
101                                     uint32_t nb_mc_addr);
102
103 /* RSS */
104 static int atl_reta_update(struct rte_eth_dev *dev,
105                              struct rte_eth_rss_reta_entry64 *reta_conf,
106                              uint16_t reta_size);
107 static int atl_reta_query(struct rte_eth_dev *dev,
108                             struct rte_eth_rss_reta_entry64 *reta_conf,
109                             uint16_t reta_size);
110 static int atl_rss_hash_update(struct rte_eth_dev *dev,
111                                  struct rte_eth_rss_conf *rss_conf);
112 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
113                                    struct rte_eth_rss_conf *rss_conf);
114
115
116 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
117         struct rte_pci_device *pci_dev);
118 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
119
120 static int atl_dev_info_get(struct rte_eth_dev *dev,
121                                 struct rte_eth_dev_info *dev_info);
122
123 /*
124  * The set of PCI devices this driver supports
125  */
126 static const struct rte_pci_id pci_id_atl_map[] = {
127         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
128         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
129         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
130         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
131         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
132
133         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
134         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
135         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
136         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
137         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
138         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
139
140         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
141         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
142         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
143         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
144         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
145         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
146
147         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
148         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
149         { .vendor_id = 0, /* sentinel */ },
150 };
151
152 static struct rte_pci_driver rte_atl_pmd = {
153         .id_table = pci_id_atl_map,
154         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
155         .probe = eth_atl_pci_probe,
156         .remove = eth_atl_pci_remove,
157 };
158
159 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
160                         | DEV_RX_OFFLOAD_IPV4_CKSUM \
161                         | DEV_RX_OFFLOAD_UDP_CKSUM \
162                         | DEV_RX_OFFLOAD_TCP_CKSUM \
163                         | DEV_RX_OFFLOAD_JUMBO_FRAME \
164                         | DEV_RX_OFFLOAD_MACSEC_STRIP \
165                         | DEV_RX_OFFLOAD_VLAN_FILTER)
166
167 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
168                         | DEV_TX_OFFLOAD_IPV4_CKSUM \
169                         | DEV_TX_OFFLOAD_UDP_CKSUM \
170                         | DEV_TX_OFFLOAD_TCP_CKSUM \
171                         | DEV_TX_OFFLOAD_TCP_TSO \
172                         | DEV_TX_OFFLOAD_MACSEC_INSERT \
173                         | DEV_TX_OFFLOAD_MULTI_SEGS)
174
175 #define SFP_EEPROM_SIZE 0x100
176
177 static const struct rte_eth_desc_lim rx_desc_lim = {
178         .nb_max = ATL_MAX_RING_DESC,
179         .nb_min = ATL_MIN_RING_DESC,
180         .nb_align = ATL_RXD_ALIGN,
181 };
182
183 static const struct rte_eth_desc_lim tx_desc_lim = {
184         .nb_max = ATL_MAX_RING_DESC,
185         .nb_min = ATL_MIN_RING_DESC,
186         .nb_align = ATL_TXD_ALIGN,
187         .nb_seg_max = ATL_TX_MAX_SEG,
188         .nb_mtu_seg_max = ATL_TX_MAX_SEG,
189 };
190
191 enum atl_xstats_type {
192         XSTATS_TYPE_MSM = 0,
193         XSTATS_TYPE_MACSEC,
194 };
195
196 #define ATL_XSTATS_FIELD(name) { \
197         #name, \
198         offsetof(struct aq_stats_s, name), \
199         XSTATS_TYPE_MSM \
200 }
201
202 #define ATL_MACSEC_XSTATS_FIELD(name) { \
203         #name, \
204         offsetof(struct macsec_stats, name), \
205         XSTATS_TYPE_MACSEC \
206 }
207
208 struct atl_xstats_tbl_s {
209         const char *name;
210         unsigned int offset;
211         enum atl_xstats_type type;
212 };
213
214 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
215         ATL_XSTATS_FIELD(uprc),
216         ATL_XSTATS_FIELD(mprc),
217         ATL_XSTATS_FIELD(bprc),
218         ATL_XSTATS_FIELD(erpt),
219         ATL_XSTATS_FIELD(uptc),
220         ATL_XSTATS_FIELD(mptc),
221         ATL_XSTATS_FIELD(bptc),
222         ATL_XSTATS_FIELD(erpr),
223         ATL_XSTATS_FIELD(ubrc),
224         ATL_XSTATS_FIELD(ubtc),
225         ATL_XSTATS_FIELD(mbrc),
226         ATL_XSTATS_FIELD(mbtc),
227         ATL_XSTATS_FIELD(bbrc),
228         ATL_XSTATS_FIELD(bbtc),
229         /* Ingress Common Counters */
230         ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
231         ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
232         ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
233         ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
234         ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
235         ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
236         ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
237         ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
238         /* Ingress SA Counters */
239         ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
240         ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
241         ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
242         ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
243         ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
244         ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
245         ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
246         ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
247         ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
248         /* Egress Common Counters */
249         ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
250         ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
251         ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
252         ATL_MACSEC_XSTATS_FIELD(out_too_long),
253         /* Egress SC Counters */
254         ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
255         ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
256         /* Egress SA Counters */
257         ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
258         ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
259         ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
260         ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
261 };
262
263 static const struct eth_dev_ops atl_eth_dev_ops = {
264         .dev_configure        = atl_dev_configure,
265         .dev_start            = atl_dev_start,
266         .dev_stop             = atl_dev_stop,
267         .dev_set_link_up      = atl_dev_set_link_up,
268         .dev_set_link_down    = atl_dev_set_link_down,
269         .dev_close            = atl_dev_close,
270         .dev_reset            = atl_dev_reset,
271
272         /* PROMISC */
273         .promiscuous_enable   = atl_dev_promiscuous_enable,
274         .promiscuous_disable  = atl_dev_promiscuous_disable,
275         .allmulticast_enable  = atl_dev_allmulticast_enable,
276         .allmulticast_disable = atl_dev_allmulticast_disable,
277
278         /* Link */
279         .link_update          = atl_dev_link_update,
280
281         .get_reg              = atl_dev_get_regs,
282
283         /* Stats */
284         .stats_get            = atl_dev_stats_get,
285         .xstats_get           = atl_dev_xstats_get,
286         .xstats_get_names     = atl_dev_xstats_get_names,
287         .stats_reset          = atl_dev_stats_reset,
288         .xstats_reset         = atl_dev_stats_reset,
289
290         .fw_version_get       = atl_fw_version_get,
291         .dev_infos_get        = atl_dev_info_get,
292         .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
293
294         .mtu_set              = atl_dev_mtu_set,
295
296         /* VLAN */
297         .vlan_filter_set      = atl_vlan_filter_set,
298         .vlan_offload_set     = atl_vlan_offload_set,
299         .vlan_tpid_set        = atl_vlan_tpid_set,
300         .vlan_strip_queue_set = atl_vlan_strip_queue_set,
301
302         /* Queue Control */
303         .rx_queue_start       = atl_rx_queue_start,
304         .rx_queue_stop        = atl_rx_queue_stop,
305         .rx_queue_setup       = atl_rx_queue_setup,
306         .rx_queue_release     = atl_rx_queue_release,
307
308         .tx_queue_start       = atl_tx_queue_start,
309         .tx_queue_stop        = atl_tx_queue_stop,
310         .tx_queue_setup       = atl_tx_queue_setup,
311         .tx_queue_release     = atl_tx_queue_release,
312
313         .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
314         .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
315
316         /* EEPROM */
317         .get_eeprom_length    = atl_dev_get_eeprom_length,
318         .get_eeprom           = atl_dev_get_eeprom,
319         .set_eeprom           = atl_dev_set_eeprom,
320
321         /* Flow Control */
322         .flow_ctrl_get        = atl_flow_ctrl_get,
323         .flow_ctrl_set        = atl_flow_ctrl_set,
324
325         /* MAC */
326         .mac_addr_add         = atl_add_mac_addr,
327         .mac_addr_remove      = atl_remove_mac_addr,
328         .mac_addr_set         = atl_set_default_mac_addr,
329         .set_mc_addr_list     = atl_dev_set_mc_addr_list,
330         .rxq_info_get         = atl_rxq_info_get,
331         .txq_info_get         = atl_txq_info_get,
332
333         .reta_update          = atl_reta_update,
334         .reta_query           = atl_reta_query,
335         .rss_hash_update      = atl_rss_hash_update,
336         .rss_hash_conf_get    = atl_rss_hash_conf_get,
337 };
338
339 static inline int32_t
340 atl_reset_hw(struct aq_hw_s *hw)
341 {
342         return hw_atl_b0_hw_reset(hw);
343 }
344
345 static inline void
346 atl_enable_intr(struct rte_eth_dev *dev)
347 {
348         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
349
350         hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
351 }
352
353 static void
354 atl_disable_intr(struct aq_hw_s *hw)
355 {
356         PMD_INIT_FUNC_TRACE();
357         hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
358 }
359
360 static int
361 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
362 {
363         struct atl_adapter *adapter = eth_dev->data->dev_private;
364         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
365         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
366         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
367         int err = 0;
368
369         PMD_INIT_FUNC_TRACE();
370
371         eth_dev->dev_ops = &atl_eth_dev_ops;
372
373         eth_dev->rx_queue_count       = atl_rx_queue_count;
374         eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
375         eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
376
377         eth_dev->rx_pkt_burst = &atl_recv_pkts;
378         eth_dev->tx_pkt_burst = &atl_xmit_pkts;
379         eth_dev->tx_pkt_prepare = &atl_prep_pkts;
380
381         /* For secondary processes, the primary process has done all the work */
382         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
383                 return 0;
384
385         /* Vendor and Device ID need to be set before init of shared code */
386         hw->device_id = pci_dev->id.device_id;
387         hw->vendor_id = pci_dev->id.vendor_id;
388         hw->mmio = (void *)pci_dev->mem_resource[0].addr;
389
390         /* Hardware configuration - hardcode */
391         adapter->hw_cfg.is_lro = false;
392         adapter->hw_cfg.wol = false;
393         adapter->hw_cfg.is_rss = false;
394         adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
395
396         adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
397                           AQ_NIC_RATE_5G |
398                           AQ_NIC_RATE_2G5 |
399                           AQ_NIC_RATE_1G |
400                           AQ_NIC_RATE_100M;
401
402         adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
403         adapter->hw_cfg.aq_rss.indirection_table_size =
404                 HW_ATL_B0_RSS_REDIRECTION_MAX;
405
406         hw->aq_nic_cfg = &adapter->hw_cfg;
407
408         pthread_mutex_init(&hw->mbox_mutex, NULL);
409
410         /* disable interrupt */
411         atl_disable_intr(hw);
412
413         /* Allocate memory for storing MAC addresses */
414         eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
415                                         RTE_ETHER_ADDR_LEN, 0);
416         if (eth_dev->data->mac_addrs == NULL) {
417                 PMD_INIT_LOG(ERR, "MAC Malloc failed");
418                 return -ENOMEM;
419         }
420
421         err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
422         if (err)
423                 return err;
424
425         /* Copy the permanent MAC address */
426         if (hw->aq_fw_ops->get_mac_permanent(hw,
427                         eth_dev->data->mac_addrs->addr_bytes) != 0)
428                 return -EINVAL;
429
430         /* Reset the hw statistics */
431         atl_dev_stats_reset(eth_dev);
432
433         rte_intr_callback_register(intr_handle,
434                                    atl_dev_interrupt_handler, eth_dev);
435
436         /* enable uio/vfio intr/eventfd mapping */
437         rte_intr_enable(intr_handle);
438
439         /* enable support intr */
440         atl_enable_intr(eth_dev);
441
442         return err;
443 }
444
445 static int
446 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
447 {
448         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
449         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
450         struct aq_hw_s *hw;
451
452         PMD_INIT_FUNC_TRACE();
453
454         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
455                 return -EPERM;
456
457         hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
458
459         if (hw->adapter_stopped == 0)
460                 atl_dev_close(eth_dev);
461
462         eth_dev->dev_ops = NULL;
463         eth_dev->rx_pkt_burst = NULL;
464         eth_dev->tx_pkt_burst = NULL;
465
466         /* disable uio intr before callback unregister */
467         rte_intr_disable(intr_handle);
468         rte_intr_callback_unregister(intr_handle,
469                                      atl_dev_interrupt_handler, eth_dev);
470
471         rte_free(eth_dev->data->mac_addrs);
472         eth_dev->data->mac_addrs = NULL;
473
474         pthread_mutex_destroy(&hw->mbox_mutex);
475
476         return 0;
477 }
478
479 static int
480 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
481         struct rte_pci_device *pci_dev)
482 {
483         return rte_eth_dev_pci_generic_probe(pci_dev,
484                 sizeof(struct atl_adapter), eth_atl_dev_init);
485 }
486
487 static int
488 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
489 {
490         return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
491 }
492
493 static int
494 atl_dev_configure(struct rte_eth_dev *dev)
495 {
496         struct atl_interrupt *intr =
497                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
498
499         PMD_INIT_FUNC_TRACE();
500
501         /* set flag to update link status after init */
502         intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
503
504         return 0;
505 }
506
507 /*
508  * Configure device link speed and setup link.
509  * It returns 0 on success.
510  */
511 static int
512 atl_dev_start(struct rte_eth_dev *dev)
513 {
514         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
516         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
517         uint32_t intr_vector = 0;
518         int status;
519         int err;
520
521         PMD_INIT_FUNC_TRACE();
522
523         /* set adapter started */
524         hw->adapter_stopped = 0;
525
526         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
527                 PMD_INIT_LOG(ERR,
528                 "Invalid link_speeds for port %u, fix speed not supported",
529                                 dev->data->port_id);
530                 return -EINVAL;
531         }
532
533         /* disable uio/vfio intr/eventfd mapping */
534         rte_intr_disable(intr_handle);
535
536         /* reinitialize adapter
537          * this calls reset and start
538          */
539         status = atl_reset_hw(hw);
540         if (status != 0)
541                 return -EIO;
542
543         err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
544
545         hw_atl_b0_hw_start(hw);
546         /* check and configure queue intr-vector mapping */
547         if ((rte_intr_cap_multiple(intr_handle) ||
548             !RTE_ETH_DEV_SRIOV(dev).active) &&
549             dev->data->dev_conf.intr_conf.rxq != 0) {
550                 intr_vector = dev->data->nb_rx_queues;
551                 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
552                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
553                                         ATL_MAX_INTR_QUEUE_NUM);
554                         return -ENOTSUP;
555                 }
556                 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
557                         PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
558                         return -1;
559                 }
560         }
561
562         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
563                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
564                                     dev->data->nb_rx_queues * sizeof(int), 0);
565                 if (intr_handle->intr_vec == NULL) {
566                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
567                                      " intr_vec", dev->data->nb_rx_queues);
568                         return -ENOMEM;
569                 }
570         }
571
572         /* initialize transmission unit */
573         atl_tx_init(dev);
574
575         /* This can fail when allocating mbufs for descriptor rings */
576         err = atl_rx_init(dev);
577         if (err) {
578                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
579                 goto error;
580         }
581
582         PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
583                 hw->fw_ver_actual >> 24,
584                 (hw->fw_ver_actual >> 16) & 0xFF,
585                 hw->fw_ver_actual & 0xFFFF);
586         PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
587
588         err = atl_start_queues(dev);
589         if (err < 0) {
590                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
591                 goto error;
592         }
593
594         err = atl_dev_set_link_up(dev);
595
596         err = hw->aq_fw_ops->update_link_status(hw);
597
598         if (err)
599                 goto error;
600
601         dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
602
603         if (rte_intr_allow_others(intr_handle)) {
604                 /* check if lsc interrupt is enabled */
605                 if (dev->data->dev_conf.intr_conf.lsc != 0)
606                         atl_dev_lsc_interrupt_setup(dev, true);
607                 else
608                         atl_dev_lsc_interrupt_setup(dev, false);
609         } else {
610                 rte_intr_callback_unregister(intr_handle,
611                                              atl_dev_interrupt_handler, dev);
612                 if (dev->data->dev_conf.intr_conf.lsc != 0)
613                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
614                                      " no intr multiplex");
615         }
616
617         /* check if rxq interrupt is enabled */
618         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
619             rte_intr_dp_is_en(intr_handle))
620                 atl_dev_rxq_interrupt_setup(dev);
621
622         /* enable uio/vfio intr/eventfd mapping */
623         rte_intr_enable(intr_handle);
624
625         /* resume enabled intr since hw reset */
626         atl_enable_intr(dev);
627
628         return 0;
629
630 error:
631         atl_stop_queues(dev);
632         return -EIO;
633 }
634
635 /*
636  * Stop device: disable rx and tx functions to allow for reconfiguring.
637  */
638 static void
639 atl_dev_stop(struct rte_eth_dev *dev)
640 {
641         struct rte_eth_link link;
642         struct aq_hw_s *hw =
643                 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
644         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
645         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
646
647         PMD_INIT_FUNC_TRACE();
648
649         /* disable interrupts */
650         atl_disable_intr(hw);
651
652         /* reset the NIC */
653         atl_reset_hw(hw);
654         hw->adapter_stopped = 1;
655
656         atl_stop_queues(dev);
657
658         /* Clear stored conf */
659         dev->data->scattered_rx = 0;
660         dev->data->lro = 0;
661
662         /* Clear recorded link status */
663         memset(&link, 0, sizeof(link));
664         rte_eth_linkstatus_set(dev, &link);
665
666         if (!rte_intr_allow_others(intr_handle))
667                 /* resume to the default handler */
668                 rte_intr_callback_register(intr_handle,
669                                            atl_dev_interrupt_handler,
670                                            (void *)dev);
671
672         /* Clean datapath event and queue/vec mapping */
673         rte_intr_efd_disable(intr_handle);
674         if (intr_handle->intr_vec != NULL) {
675                 rte_free(intr_handle->intr_vec);
676                 intr_handle->intr_vec = NULL;
677         }
678 }
679
680 /*
681  * Set device link up: enable tx.
682  */
683 static int
684 atl_dev_set_link_up(struct rte_eth_dev *dev)
685 {
686         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
687         uint32_t link_speeds = dev->data->dev_conf.link_speeds;
688         uint32_t speed_mask = 0;
689
690         if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
691                 speed_mask = hw->aq_nic_cfg->link_speed_msk;
692         } else {
693                 if (link_speeds & ETH_LINK_SPEED_10G)
694                         speed_mask |= AQ_NIC_RATE_10G;
695                 if (link_speeds & ETH_LINK_SPEED_5G)
696                         speed_mask |= AQ_NIC_RATE_5G;
697                 if (link_speeds & ETH_LINK_SPEED_1G)
698                         speed_mask |= AQ_NIC_RATE_1G;
699                 if (link_speeds & ETH_LINK_SPEED_2_5G)
700                         speed_mask |=  AQ_NIC_RATE_2G5;
701                 if (link_speeds & ETH_LINK_SPEED_100M)
702                         speed_mask |= AQ_NIC_RATE_100M;
703         }
704
705         return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
706 }
707
708 /*
709  * Set device link down: disable tx.
710  */
711 static int
712 atl_dev_set_link_down(struct rte_eth_dev *dev)
713 {
714         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
715
716         return hw->aq_fw_ops->set_link_speed(hw, 0);
717 }
718
719 /*
720  * Reset and stop device.
721  */
722 static int
723 atl_dev_close(struct rte_eth_dev *dev)
724 {
725         PMD_INIT_FUNC_TRACE();
726
727         atl_dev_stop(dev);
728
729         atl_free_queues(dev);
730
731         return 0;
732 }
733
734 static int
735 atl_dev_reset(struct rte_eth_dev *dev)
736 {
737         int ret;
738
739         ret = eth_atl_dev_uninit(dev);
740         if (ret)
741                 return ret;
742
743         ret = eth_atl_dev_init(dev);
744
745         return ret;
746 }
747
748 static int
749 atl_dev_configure_macsec(struct rte_eth_dev *dev)
750 {
751         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
752         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
753         struct aq_macsec_config *aqcfg = &cf->aq_macsec;
754         struct macsec_msg_fw_request msg_macsec;
755         struct macsec_msg_fw_response response;
756
757         if (!aqcfg->common.macsec_enabled ||
758             hw->aq_fw_ops->send_macsec_req == NULL)
759                 return 0;
760
761         memset(&msg_macsec, 0, sizeof(msg_macsec));
762
763         /* Creating set of sc/sa structures from parameters provided by DPDK */
764
765         /* Configure macsec */
766         msg_macsec.msg_type = macsec_cfg_msg;
767         msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
768         msg_macsec.cfg.interrupts_enabled = 1;
769
770         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
771
772         if (response.result)
773                 return -1;
774
775         memset(&msg_macsec, 0, sizeof(msg_macsec));
776
777         /* Configure TX SC */
778
779         msg_macsec.msg_type = macsec_add_tx_sc_msg;
780         msg_macsec.txsc.index = 0; /* TXSC always one (??) */
781         msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
782
783         /* MAC addr for TX */
784         msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
785         msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
786         msg_macsec.txsc.sa_mask = 0x3f;
787
788         msg_macsec.txsc.da_mask = 0;
789         msg_macsec.txsc.tci = 0x0B;
790         msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
791
792         /*
793          * Creating SCI (Secure Channel Identifier).
794          * SCI constructed from Source MAC and Port identifier
795          */
796         uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
797                                (msg_macsec.txsc.mac_sa[0] >> 16);
798         uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
799
800         uint32_t port_identifier = 1;
801
802         msg_macsec.txsc.sci[1] = sci_hi_part;
803         msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
804
805         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
806
807         if (response.result)
808                 return -1;
809
810         memset(&msg_macsec, 0, sizeof(msg_macsec));
811
812         /* Configure RX SC */
813
814         msg_macsec.msg_type = macsec_add_rx_sc_msg;
815         msg_macsec.rxsc.index = aqcfg->rxsc.pi;
816         msg_macsec.rxsc.replay_protect =
817                 aqcfg->common.replay_protection_enabled;
818         msg_macsec.rxsc.anti_replay_window = 0;
819
820         /* MAC addr for RX */
821         msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
822         msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
823         msg_macsec.rxsc.da_mask = 0;//0x3f;
824
825         msg_macsec.rxsc.sa_mask = 0;
826
827         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
828
829         if (response.result)
830                 return -1;
831
832         memset(&msg_macsec, 0, sizeof(msg_macsec));
833
834         /* Configure RX SC */
835
836         msg_macsec.msg_type = macsec_add_tx_sa_msg;
837         msg_macsec.txsa.index = aqcfg->txsa.idx;
838         msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
839
840         msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
841         msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
842         msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
843         msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
844
845         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
846
847         if (response.result)
848                 return -1;
849
850         memset(&msg_macsec, 0, sizeof(msg_macsec));
851
852         /* Configure RX SA */
853
854         msg_macsec.msg_type = macsec_add_rx_sa_msg;
855         msg_macsec.rxsa.index = aqcfg->rxsa.idx;
856         msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
857
858         msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
859         msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
860         msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
861         msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
862
863         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
864
865         if (response.result)
866                 return -1;
867
868         return 0;
869 }
870
871 int atl_macsec_enable(struct rte_eth_dev *dev,
872                       uint8_t encr, uint8_t repl_prot)
873 {
874         struct aq_hw_cfg_s *cfg =
875                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
876
877         cfg->aq_macsec.common.macsec_enabled = 1;
878         cfg->aq_macsec.common.encryption_enabled = encr;
879         cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
880
881         return 0;
882 }
883
884 int atl_macsec_disable(struct rte_eth_dev *dev)
885 {
886         struct aq_hw_cfg_s *cfg =
887                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
888
889         cfg->aq_macsec.common.macsec_enabled = 0;
890
891         return 0;
892 }
893
894 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
895 {
896         struct aq_hw_cfg_s *cfg =
897                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
898
899         memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
900         memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
901                 RTE_ETHER_ADDR_LEN);
902
903         return 0;
904 }
905
906 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
907                            uint8_t *mac, uint16_t pi)
908 {
909         struct aq_hw_cfg_s *cfg =
910                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
911
912         memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
913         memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
914                 RTE_ETHER_ADDR_LEN);
915         cfg->aq_macsec.rxsc.pi = pi;
916
917         return 0;
918 }
919
920 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
921                            uint8_t idx, uint8_t an,
922                            uint32_t pn, uint8_t *key)
923 {
924         struct aq_hw_cfg_s *cfg =
925                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
926
927         cfg->aq_macsec.txsa.idx = idx;
928         cfg->aq_macsec.txsa.pn = pn;
929         cfg->aq_macsec.txsa.an = an;
930
931         memcpy(&cfg->aq_macsec.txsa.key, key, 16);
932         return 0;
933 }
934
935 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
936                            uint8_t idx, uint8_t an,
937                            uint32_t pn, uint8_t *key)
938 {
939         struct aq_hw_cfg_s *cfg =
940                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
941
942         cfg->aq_macsec.rxsa.idx = idx;
943         cfg->aq_macsec.rxsa.pn = pn;
944         cfg->aq_macsec.rxsa.an = an;
945
946         memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
947         return 0;
948 }
949
950 static int
951 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
952 {
953         struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
954         struct aq_hw_s *hw = &adapter->hw;
955         struct atl_sw_stats *swstats = &adapter->sw_stats;
956         unsigned int i;
957
958         hw->aq_fw_ops->update_stats(hw);
959
960         /* Fill out the rte_eth_stats statistics structure */
961         stats->ipackets = hw->curr_stats.dma_pkt_rc;
962         stats->ibytes = hw->curr_stats.dma_oct_rc;
963         stats->imissed = hw->curr_stats.dpc;
964         stats->ierrors = hw->curr_stats.erpt;
965
966         stats->opackets = hw->curr_stats.dma_pkt_tc;
967         stats->obytes = hw->curr_stats.dma_oct_tc;
968         stats->oerrors = 0;
969
970         stats->rx_nombuf = swstats->rx_nombuf;
971
972         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
973                 stats->q_ipackets[i] = swstats->q_ipackets[i];
974                 stats->q_opackets[i] = swstats->q_opackets[i];
975                 stats->q_ibytes[i] = swstats->q_ibytes[i];
976                 stats->q_obytes[i] = swstats->q_obytes[i];
977                 stats->q_errors[i] = swstats->q_errors[i];
978         }
979         return 0;
980 }
981
982 static int
983 atl_dev_stats_reset(struct rte_eth_dev *dev)
984 {
985         struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
986         struct aq_hw_s *hw = &adapter->hw;
987
988         hw->aq_fw_ops->update_stats(hw);
989
990         /* Reset software totals */
991         memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
992
993         memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
994
995         return 0;
996 }
997
998 static int
999 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
1000 {
1001         struct atl_adapter *adapter =
1002                 (struct atl_adapter *)dev->data->dev_private;
1003
1004         struct aq_hw_s *hw = &adapter->hw;
1005         unsigned int i, count = 0;
1006
1007         for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
1008                 if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
1009                         ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
1010                         continue;
1011
1012                 count++;
1013         }
1014
1015         return count;
1016 }
1017
1018 static int
1019 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1020                          struct rte_eth_xstat_name *xstats_names,
1021                          unsigned int size)
1022 {
1023         unsigned int i;
1024         unsigned int count = atl_dev_xstats_get_count(dev);
1025
1026         if (xstats_names) {
1027                 for (i = 0; i < size && i < count; i++) {
1028                         snprintf(xstats_names[i].name,
1029                                 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1030                                 atl_xstats_tbl[i].name);
1031                 }
1032         }
1033
1034         return count;
1035 }
1036
1037 static int
1038 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1039                    unsigned int n)
1040 {
1041         struct atl_adapter *adapter = dev->data->dev_private;
1042         struct aq_hw_s *hw = &adapter->hw;
1043         struct get_stats req = { 0 };
1044         struct macsec_msg_fw_request msg = { 0 };
1045         struct macsec_msg_fw_response resp = { 0 };
1046         int err = -1;
1047         unsigned int i;
1048         unsigned int count = atl_dev_xstats_get_count(dev);
1049
1050         if (!stats)
1051                 return count;
1052
1053         if (hw->aq_fw_ops->send_macsec_req != NULL) {
1054                 req.ingress_sa_index = 0xff;
1055                 req.egress_sc_index = 0xff;
1056                 req.egress_sa_index = 0xff;
1057
1058                 msg.msg_type = macsec_get_stats_msg;
1059                 msg.stats = req;
1060
1061                 err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1062         }
1063
1064         for (i = 0; i < n && i < count; i++) {
1065                 stats[i].id = i;
1066
1067                 switch (atl_xstats_tbl[i].type) {
1068                 case XSTATS_TYPE_MSM:
1069                         stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1070                                          atl_xstats_tbl[i].offset);
1071                         break;
1072                 case XSTATS_TYPE_MACSEC:
1073                         if (!err) {
1074                                 stats[i].value =
1075                                         *(u64 *)((uint8_t *)&resp.stats +
1076                                         atl_xstats_tbl[i].offset);
1077                         }
1078                         break;
1079                 }
1080         }
1081
1082         return i;
1083 }
1084
1085 static int
1086 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1087 {
1088         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1089         uint32_t fw_ver = 0;
1090         unsigned int ret = 0;
1091
1092         ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1093         if (ret)
1094                 return -EIO;
1095
1096         ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1097                        (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1098
1099         ret += 1; /* add string null-terminator */
1100
1101         if (fw_size < ret)
1102                 return ret;
1103
1104         return 0;
1105 }
1106
1107 static int
1108 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1109 {
1110         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1111
1112         dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1113         dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1114
1115         dev_info->min_rx_bufsize = 1024;
1116         dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1117         dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1118         dev_info->max_vfs = pci_dev->max_vfs;
1119
1120         dev_info->max_hash_mac_addrs = 0;
1121         dev_info->max_vmdq_pools = 0;
1122         dev_info->vmdq_queue_num = 0;
1123
1124         dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1125
1126         dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1127
1128
1129         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1130                 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1131         };
1132
1133         dev_info->default_txconf = (struct rte_eth_txconf) {
1134                 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1135         };
1136
1137         dev_info->rx_desc_lim = rx_desc_lim;
1138         dev_info->tx_desc_lim = tx_desc_lim;
1139
1140         dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1141         dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1142         dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1143
1144         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1145         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1146         dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1147         dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1148
1149         return 0;
1150 }
1151
1152 static const uint32_t *
1153 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1154 {
1155         static const uint32_t ptypes[] = {
1156                 RTE_PTYPE_L2_ETHER,
1157                 RTE_PTYPE_L2_ETHER_ARP,
1158                 RTE_PTYPE_L2_ETHER_VLAN,
1159                 RTE_PTYPE_L3_IPV4,
1160                 RTE_PTYPE_L3_IPV6,
1161                 RTE_PTYPE_L4_TCP,
1162                 RTE_PTYPE_L4_UDP,
1163                 RTE_PTYPE_L4_SCTP,
1164                 RTE_PTYPE_L4_ICMP,
1165                 RTE_PTYPE_UNKNOWN
1166         };
1167
1168         if (dev->rx_pkt_burst == atl_recv_pkts)
1169                 return ptypes;
1170
1171         return NULL;
1172 }
1173
1174 static void
1175 atl_dev_delayed_handler(void *param)
1176 {
1177         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1178
1179         atl_dev_configure_macsec(dev);
1180 }
1181
1182
1183 /* return 0 means link status changed, -1 means not changed */
1184 static int
1185 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1186 {
1187         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1188         struct rte_eth_link link, old;
1189         u32 fc = AQ_NIC_FC_OFF;
1190         int err = 0;
1191
1192         link.link_status = ETH_LINK_DOWN;
1193         link.link_speed = 0;
1194         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1195         link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1196         memset(&old, 0, sizeof(old));
1197
1198         /* load old link status */
1199         rte_eth_linkstatus_get(dev, &old);
1200
1201         /* read current link status */
1202         err = hw->aq_fw_ops->update_link_status(hw);
1203
1204         if (err)
1205                 return 0;
1206
1207         if (hw->aq_link_status.mbps == 0) {
1208                 /* write default (down) link status */
1209                 rte_eth_linkstatus_set(dev, &link);
1210                 if (link.link_status == old.link_status)
1211                         return -1;
1212                 return 0;
1213         }
1214
1215         link.link_status = ETH_LINK_UP;
1216         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1217         link.link_speed = hw->aq_link_status.mbps;
1218
1219         rte_eth_linkstatus_set(dev, &link);
1220
1221         if (link.link_status == old.link_status)
1222                 return -1;
1223
1224         /* Driver has to update flow control settings on RX block
1225          * on any link event.
1226          * We should query FW whether it negotiated FC.
1227          */
1228         if (hw->aq_fw_ops->get_flow_control) {
1229                 hw->aq_fw_ops->get_flow_control(hw, &fc);
1230                 hw_atl_b0_set_fc(hw, fc, 0U);
1231         }
1232
1233         if (rte_eal_alarm_set(1000 * 1000,
1234                               atl_dev_delayed_handler, (void *)dev) < 0)
1235                 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1236
1237         return 0;
1238 }
1239
1240 static int
1241 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1242 {
1243         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1244
1245         hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1246
1247         return 0;
1248 }
1249
1250 static int
1251 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1252 {
1253         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1254
1255         hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1256
1257         return 0;
1258 }
1259
1260 static int
1261 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1262 {
1263         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1264
1265         hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1266
1267         return 0;
1268 }
1269
1270 static int
1271 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1272 {
1273         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1274
1275         if (dev->data->promiscuous == 1)
1276                 return 0; /* must remain in all_multicast mode */
1277
1278         hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1279
1280         return 0;
1281 }
1282
1283 /**
1284  * It clears the interrupt causes and enables the interrupt.
1285  * It will be called once only during nic initialized.
1286  *
1287  * @param dev
1288  *  Pointer to struct rte_eth_dev.
1289  * @param on
1290  *  Enable or Disable.
1291  *
1292  * @return
1293  *  - On success, zero.
1294  *  - On failure, a negative value.
1295  */
1296
1297 static int
1298 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1299 {
1300         atl_dev_link_status_print(dev);
1301         return 0;
1302 }
1303
1304 static int
1305 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1306 {
1307         return 0;
1308 }
1309
1310
1311 static int
1312 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1313 {
1314         struct atl_interrupt *intr =
1315                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1316         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1317         u64 cause = 0;
1318
1319         hw_atl_b0_hw_irq_read(hw, &cause);
1320
1321         atl_disable_intr(hw);
1322
1323         if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1324                 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1325
1326         return 0;
1327 }
1328
1329 /**
1330  * It gets and then prints the link status.
1331  *
1332  * @param dev
1333  *  Pointer to struct rte_eth_dev.
1334  *
1335  * @return
1336  *  - On success, zero.
1337  *  - On failure, a negative value.
1338  */
1339 static void
1340 atl_dev_link_status_print(struct rte_eth_dev *dev)
1341 {
1342         struct rte_eth_link link;
1343
1344         memset(&link, 0, sizeof(link));
1345         rte_eth_linkstatus_get(dev, &link);
1346         if (link.link_status) {
1347                 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1348                                         (int)(dev->data->port_id),
1349                                         (unsigned int)link.link_speed,
1350                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1351                                         "full-duplex" : "half-duplex");
1352         } else {
1353                 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1354                                 (int)(dev->data->port_id));
1355         }
1356
1357
1358 #ifdef DEBUG
1359 {
1360         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1361
1362         PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1363                                 pci_dev->addr.domain,
1364                                 pci_dev->addr.bus,
1365                                 pci_dev->addr.devid,
1366                                 pci_dev->addr.function);
1367 }
1368 #endif
1369
1370         PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1371 }
1372
1373 /*
1374  * It executes link_update after knowing an interrupt occurred.
1375  *
1376  * @param dev
1377  *  Pointer to struct rte_eth_dev.
1378  *
1379  * @return
1380  *  - On success, zero.
1381  *  - On failure, a negative value.
1382  */
1383 static int
1384 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1385                            struct rte_intr_handle *intr_handle)
1386 {
1387         struct atl_interrupt *intr =
1388                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1389         struct atl_adapter *adapter = dev->data->dev_private;
1390         struct aq_hw_s *hw = &adapter->hw;
1391
1392         if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1393                 goto done;
1394
1395         intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1396
1397         /* Notify userapp if link status changed */
1398         if (!atl_dev_link_update(dev, 0)) {
1399                 atl_dev_link_status_print(dev);
1400                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1401         } else {
1402                 if (hw->aq_fw_ops->send_macsec_req == NULL)
1403                         goto done;
1404
1405                 /* Check macsec Keys expired */
1406                 struct get_stats req = { 0 };
1407                 struct macsec_msg_fw_request msg = { 0 };
1408                 struct macsec_msg_fw_response resp = { 0 };
1409
1410                 req.ingress_sa_index = 0x0;
1411                 req.egress_sc_index = 0x0;
1412                 req.egress_sa_index = 0x0;
1413                 msg.msg_type = macsec_get_stats_msg;
1414                 msg.stats = req;
1415
1416                 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1417                 if (err) {
1418                         PMD_DRV_LOG(ERR, "send_macsec_req fail");
1419                         goto done;
1420                 }
1421                 if (resp.stats.egress_threshold_expired ||
1422                     resp.stats.ingress_threshold_expired ||
1423                     resp.stats.egress_expired ||
1424                     resp.stats.ingress_expired) {
1425                         PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1426                         rte_eth_dev_callback_process(dev,
1427                                 RTE_ETH_EVENT_MACSEC, NULL);
1428                 }
1429         }
1430 done:
1431         atl_enable_intr(dev);
1432         rte_intr_ack(intr_handle);
1433
1434         return 0;
1435 }
1436
1437 /**
1438  * Interrupt handler triggered by NIC  for handling
1439  * specific interrupt.
1440  *
1441  * @param handle
1442  *  Pointer to interrupt handle.
1443  * @param param
1444  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1445  *
1446  * @return
1447  *  void
1448  */
1449 static void
1450 atl_dev_interrupt_handler(void *param)
1451 {
1452         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1453
1454         atl_dev_interrupt_get_status(dev);
1455         atl_dev_interrupt_action(dev, dev->intr_handle);
1456 }
1457
1458
1459 static int
1460 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1461 {
1462         return SFP_EEPROM_SIZE;
1463 }
1464
1465 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1466                        struct rte_dev_eeprom_info *eeprom)
1467 {
1468         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1469         uint32_t dev_addr = SMBUS_DEVICE_ID;
1470
1471         if (hw->aq_fw_ops->get_eeprom == NULL)
1472                 return -ENOTSUP;
1473
1474         if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1475             eeprom->data == NULL)
1476                 return -EINVAL;
1477
1478         if (eeprom->magic > 0x7F)
1479                 return -EINVAL;
1480
1481         if (eeprom->magic)
1482                 dev_addr = eeprom->magic;
1483
1484         return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1485                                          eeprom->length, eeprom->offset);
1486 }
1487
1488 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1489                        struct rte_dev_eeprom_info *eeprom)
1490 {
1491         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1492         uint32_t dev_addr = SMBUS_DEVICE_ID;
1493
1494         if (hw->aq_fw_ops->set_eeprom == NULL)
1495                 return -ENOTSUP;
1496
1497         if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1498             eeprom->data == NULL)
1499                 return -EINVAL;
1500
1501         if (eeprom->magic > 0x7F)
1502                 return -EINVAL;
1503
1504         if (eeprom->magic)
1505                 dev_addr = eeprom->magic;
1506
1507         return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1508                                          eeprom->length, eeprom->offset);
1509 }
1510
1511 static int
1512 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1513 {
1514         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1515         u32 mif_id;
1516         int err;
1517
1518         if (regs->data == NULL) {
1519                 regs->length = hw_atl_utils_hw_get_reg_length();
1520                 regs->width = sizeof(u32);
1521                 return 0;
1522         }
1523
1524         /* Only full register dump is supported */
1525         if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1526                 return -ENOTSUP;
1527
1528         err = hw_atl_utils_hw_get_regs(hw, regs->data);
1529
1530         /* Device version */
1531         mif_id = hw_atl_reg_glb_mif_id_get(hw);
1532         regs->version = mif_id & 0xFFU;
1533
1534         return err;
1535 }
1536
1537 static int
1538 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1539 {
1540         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1541         u32 fc = AQ_NIC_FC_OFF;
1542
1543         if (hw->aq_fw_ops->get_flow_control == NULL)
1544                 return -ENOTSUP;
1545
1546         hw->aq_fw_ops->get_flow_control(hw, &fc);
1547
1548         if (fc == AQ_NIC_FC_OFF)
1549                 fc_conf->mode = RTE_FC_NONE;
1550         else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1551                 fc_conf->mode = RTE_FC_FULL;
1552         else if (fc & AQ_NIC_FC_RX)
1553                 fc_conf->mode = RTE_FC_RX_PAUSE;
1554         else if (fc & AQ_NIC_FC_TX)
1555                 fc_conf->mode = RTE_FC_TX_PAUSE;
1556
1557         return 0;
1558 }
1559
1560 static int
1561 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1562 {
1563         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1564         uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1565
1566
1567         if (hw->aq_fw_ops->set_flow_control == NULL)
1568                 return -ENOTSUP;
1569
1570         if (fc_conf->mode == RTE_FC_NONE)
1571                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1572         else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1573                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1574         else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1575                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1576         else if (fc_conf->mode == RTE_FC_FULL)
1577                 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1578
1579         if (old_flow_control != hw->aq_nic_cfg->flow_control)
1580                 return hw->aq_fw_ops->set_flow_control(hw);
1581
1582         return 0;
1583 }
1584
1585 static int
1586 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1587                     u8 *mac_addr, bool enable)
1588 {
1589         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1590         unsigned int h = 0U;
1591         unsigned int l = 0U;
1592         int err;
1593
1594         if (mac_addr) {
1595                 h = (mac_addr[0] << 8) | (mac_addr[1]);
1596                 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1597                         (mac_addr[4] << 8) | mac_addr[5];
1598         }
1599
1600         hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1601         hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1602         hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1603
1604         if (enable)
1605                 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1606
1607         err = aq_hw_err_from_flags(hw);
1608
1609         return err;
1610 }
1611
1612 static int
1613 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1614                         uint32_t index __rte_unused, uint32_t pool __rte_unused)
1615 {
1616         if (rte_is_zero_ether_addr(mac_addr)) {
1617                 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1618                 return -EINVAL;
1619         }
1620
1621         return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1622 }
1623
1624 static void
1625 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1626 {
1627         atl_update_mac_addr(dev, index, NULL, false);
1628 }
1629
1630 static int
1631 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1632 {
1633         atl_remove_mac_addr(dev, 0);
1634         atl_add_mac_addr(dev, addr, 0, 0);
1635         return 0;
1636 }
1637
1638 static int
1639 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1640 {
1641         struct rte_eth_dev_info dev_info;
1642         int ret;
1643         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1644
1645         ret = atl_dev_info_get(dev, &dev_info);
1646         if (ret != 0)
1647                 return ret;
1648
1649         if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1650                 return -EINVAL;
1651
1652         /* update max frame size */
1653         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1654
1655         return 0;
1656 }
1657
1658 static int
1659 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1660 {
1661         struct aq_hw_cfg_s *cfg =
1662                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1663         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1664         int err = 0;
1665         int i = 0;
1666
1667         PMD_INIT_FUNC_TRACE();
1668
1669         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1670                 if (cfg->vlan_filter[i] == vlan_id) {
1671                         if (!on) {
1672                                 /* Disable VLAN filter. */
1673                                 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1674
1675                                 /* Clear VLAN filter entry */
1676                                 cfg->vlan_filter[i] = 0;
1677                         }
1678                         break;
1679                 }
1680         }
1681
1682         /* VLAN_ID was not found. So, nothing to delete. */
1683         if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1684                 goto exit;
1685
1686         /* VLAN_ID already exist, or already removed above. Nothing to do. */
1687         if (i != HW_ATL_B0_MAX_VLAN_IDS)
1688                 goto exit;
1689
1690         /* Try to found free VLAN filter to add new VLAN_ID */
1691         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1692                 if (cfg->vlan_filter[i] == 0)
1693                         break;
1694         }
1695
1696         if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1697                 /* We have no free VLAN filter to add new VLAN_ID*/
1698                 err = -ENOMEM;
1699                 goto exit;
1700         }
1701
1702         cfg->vlan_filter[i] = vlan_id;
1703         hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1704         hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1705         hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1706
1707 exit:
1708         /* Enable VLAN promisc mode if vlan_filter empty  */
1709         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1710                 if (cfg->vlan_filter[i] != 0)
1711                         break;
1712         }
1713
1714         hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1715
1716         return err;
1717 }
1718
1719 static int
1720 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1721 {
1722         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723         struct aq_hw_cfg_s *cfg =
1724                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1725         int i;
1726
1727         PMD_INIT_FUNC_TRACE();
1728
1729         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1730                 if (cfg->vlan_filter[i])
1731                         hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1732         }
1733         return 0;
1734 }
1735
1736 static int
1737 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1738 {
1739         struct aq_hw_cfg_s *cfg =
1740                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1741         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1742         int ret = 0;
1743         int i;
1744
1745         PMD_INIT_FUNC_TRACE();
1746
1747         ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1748
1749         cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1750
1751         for (i = 0; i < dev->data->nb_rx_queues; i++)
1752                 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1753
1754         if (mask & ETH_VLAN_EXTEND_MASK)
1755                 ret = -ENOTSUP;
1756
1757         return ret;
1758 }
1759
1760 static int
1761 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1762                   uint16_t tpid)
1763 {
1764         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1765         int err = 0;
1766
1767         PMD_INIT_FUNC_TRACE();
1768
1769         switch (vlan_type) {
1770         case ETH_VLAN_TYPE_INNER:
1771                 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1772                 break;
1773         case ETH_VLAN_TYPE_OUTER:
1774                 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1775                 break;
1776         default:
1777                 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1778                 err = -ENOTSUP;
1779         }
1780
1781         return err;
1782 }
1783
1784 static void
1785 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1786 {
1787         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1788
1789         PMD_INIT_FUNC_TRACE();
1790
1791         if (queue_id > dev->data->nb_rx_queues) {
1792                 PMD_DRV_LOG(ERR, "Invalid queue id");
1793                 return;
1794         }
1795
1796         hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1797 }
1798
1799 static int
1800 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1801                           struct rte_ether_addr *mc_addr_set,
1802                           uint32_t nb_mc_addr)
1803 {
1804         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1805         u32 i;
1806
1807         if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1808                 return -EINVAL;
1809
1810         /* Update whole uc filters table */
1811         for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1812                 u8 *mac_addr = NULL;
1813                 u32 l = 0, h = 0;
1814
1815                 if (i < nb_mc_addr) {
1816                         mac_addr = mc_addr_set[i].addr_bytes;
1817                         l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1818                                 (mac_addr[4] << 8) | mac_addr[5];
1819                         h = (mac_addr[0] << 8) | mac_addr[1];
1820                 }
1821
1822                 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1823                 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1824                                                         HW_ATL_B0_MAC_MIN + i);
1825                 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1826                                                         HW_ATL_B0_MAC_MIN + i);
1827                 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1828                                            HW_ATL_B0_MAC_MIN + i);
1829         }
1830
1831         return 0;
1832 }
1833
1834 static int
1835 atl_reta_update(struct rte_eth_dev *dev,
1836                    struct rte_eth_rss_reta_entry64 *reta_conf,
1837                    uint16_t reta_size)
1838 {
1839         int i;
1840         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1841         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1842
1843         for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1844                 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1845                                         dev->data->nb_rx_queues - 1);
1846
1847         hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1848         return 0;
1849 }
1850
1851 static int
1852 atl_reta_query(struct rte_eth_dev *dev,
1853                     struct rte_eth_rss_reta_entry64 *reta_conf,
1854                     uint16_t reta_size)
1855 {
1856         int i;
1857         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1858
1859         for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1860                 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1861         reta_conf->mask = ~0U;
1862         return 0;
1863 }
1864
1865 static int
1866 atl_rss_hash_update(struct rte_eth_dev *dev,
1867                                  struct rte_eth_rss_conf *rss_conf)
1868 {
1869         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1870         struct aq_hw_cfg_s *cfg =
1871                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1872         static u8 def_rss_key[40] = {
1873                 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1874                 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1875                 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1876                 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1877                 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1878         };
1879
1880         cfg->is_rss = !!rss_conf->rss_hf;
1881         if (rss_conf->rss_key) {
1882                 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1883                        rss_conf->rss_key_len);
1884                 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1885         } else {
1886                 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1887                        sizeof(def_rss_key));
1888                 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1889         }
1890
1891         hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1892         hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1893         return 0;
1894 }
1895
1896 static int
1897 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1898                                  struct rte_eth_rss_conf *rss_conf)
1899 {
1900         struct aq_hw_cfg_s *cfg =
1901                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1902
1903         rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1904         if (rss_conf->rss_key) {
1905                 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1906                 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1907                        rss_conf->rss_key_len);
1908         }
1909
1910         return 0;
1911 }
1912
1913 static bool
1914 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1915 {
1916         if (strcmp(dev->device->driver->name, drv->driver.name))
1917                 return false;
1918
1919         return true;
1920 }
1921
1922 bool
1923 is_atlantic_supported(struct rte_eth_dev *dev)
1924 {
1925         return is_device_supported(dev, &rte_atl_pmd);
1926 }
1927
1928 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1929 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1930 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1931 RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE);
1932 RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE);