ethdev: reset all when releasing a port
[dpdk.git] / drivers / net / atlantic / atl_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Aquantia Corporation
3  */
4
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
7 #include <rte_alarm.h>
8
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
12 #include "atl_logs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
16
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int  atl_dev_configure(struct rte_eth_dev *dev);
19 static int  atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int  atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int  atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static int  atl_dev_close(struct rte_eth_dev *dev);
24 static int  atl_dev_reset(struct rte_eth_dev *dev);
25 static int  atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static int  atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int  atl_dev_link_update(struct rte_eth_dev *dev, int wait);
30
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32                                     struct rte_eth_xstat_name *xstats_names,
33                                     unsigned int size);
34
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36                                 struct rte_eth_stats *stats);
37
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39                               struct rte_eth_xstat *stats, unsigned int n);
40
41 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
42
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
44                               size_t fw_size);
45
46 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
47
48 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
49
50 /* VLAN stuff */
51 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
52                 uint16_t vlan_id, int on);
53
54 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
55
56 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
57                                      uint16_t queue_id, int on);
58
59 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
60                              enum rte_vlan_type vlan_type, uint16_t tpid);
61
62 /* EEPROM */
63 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
64 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
65                               struct rte_dev_eeprom_info *eeprom);
66 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
67                               struct rte_dev_eeprom_info *eeprom);
68
69 /* Regs */
70 static int atl_dev_get_regs(struct rte_eth_dev *dev,
71                             struct rte_dev_reg_info *regs);
72
73 /* Flow control */
74 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
75                                struct rte_eth_fc_conf *fc_conf);
76 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
77                                struct rte_eth_fc_conf *fc_conf);
78
79 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
80
81 /* Interrupts */
82 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
83 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
84 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
85 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
86                                     struct rte_intr_handle *handle);
87 static void atl_dev_interrupt_handler(void *param);
88
89
90 static int atl_add_mac_addr(struct rte_eth_dev *dev,
91                             struct rte_ether_addr *mac_addr,
92                             uint32_t index, uint32_t pool);
93 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
94 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
95                                            struct rte_ether_addr *mac_addr);
96
97 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
98                                     struct rte_ether_addr *mc_addr_set,
99                                     uint32_t nb_mc_addr);
100
101 /* RSS */
102 static int atl_reta_update(struct rte_eth_dev *dev,
103                              struct rte_eth_rss_reta_entry64 *reta_conf,
104                              uint16_t reta_size);
105 static int atl_reta_query(struct rte_eth_dev *dev,
106                             struct rte_eth_rss_reta_entry64 *reta_conf,
107                             uint16_t reta_size);
108 static int atl_rss_hash_update(struct rte_eth_dev *dev,
109                                  struct rte_eth_rss_conf *rss_conf);
110 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
111                                    struct rte_eth_rss_conf *rss_conf);
112
113
114 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
115         struct rte_pci_device *pci_dev);
116 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
117
118 static int atl_dev_info_get(struct rte_eth_dev *dev,
119                                 struct rte_eth_dev_info *dev_info);
120
121 /*
122  * The set of PCI devices this driver supports
123  */
124 static const struct rte_pci_id pci_id_atl_map[] = {
125         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
126         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
127         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
128         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
129         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
130
131         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
132         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
133         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
134         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
135         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
136         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
137
138         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
139         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
140         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
141         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
142         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
143         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
144
145         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
146         { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
147         { .vendor_id = 0, /* sentinel */ },
148 };
149
150 static struct rte_pci_driver rte_atl_pmd = {
151         .id_table = pci_id_atl_map,
152         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
153         .probe = eth_atl_pci_probe,
154         .remove = eth_atl_pci_remove,
155 };
156
157 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
158                         | DEV_RX_OFFLOAD_IPV4_CKSUM \
159                         | DEV_RX_OFFLOAD_UDP_CKSUM \
160                         | DEV_RX_OFFLOAD_TCP_CKSUM \
161                         | DEV_RX_OFFLOAD_JUMBO_FRAME \
162                         | DEV_RX_OFFLOAD_MACSEC_STRIP \
163                         | DEV_RX_OFFLOAD_VLAN_FILTER)
164
165 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
166                         | DEV_TX_OFFLOAD_IPV4_CKSUM \
167                         | DEV_TX_OFFLOAD_UDP_CKSUM \
168                         | DEV_TX_OFFLOAD_TCP_CKSUM \
169                         | DEV_TX_OFFLOAD_TCP_TSO \
170                         | DEV_TX_OFFLOAD_MACSEC_INSERT \
171                         | DEV_TX_OFFLOAD_MULTI_SEGS)
172
173 #define SFP_EEPROM_SIZE 0x100
174
175 static const struct rte_eth_desc_lim rx_desc_lim = {
176         .nb_max = ATL_MAX_RING_DESC,
177         .nb_min = ATL_MIN_RING_DESC,
178         .nb_align = ATL_RXD_ALIGN,
179 };
180
181 static const struct rte_eth_desc_lim tx_desc_lim = {
182         .nb_max = ATL_MAX_RING_DESC,
183         .nb_min = ATL_MIN_RING_DESC,
184         .nb_align = ATL_TXD_ALIGN,
185         .nb_seg_max = ATL_TX_MAX_SEG,
186         .nb_mtu_seg_max = ATL_TX_MAX_SEG,
187 };
188
189 enum atl_xstats_type {
190         XSTATS_TYPE_MSM = 0,
191         XSTATS_TYPE_MACSEC,
192 };
193
194 #define ATL_XSTATS_FIELD(name) { \
195         #name, \
196         offsetof(struct aq_stats_s, name), \
197         XSTATS_TYPE_MSM \
198 }
199
200 #define ATL_MACSEC_XSTATS_FIELD(name) { \
201         #name, \
202         offsetof(struct macsec_stats, name), \
203         XSTATS_TYPE_MACSEC \
204 }
205
206 struct atl_xstats_tbl_s {
207         const char *name;
208         unsigned int offset;
209         enum atl_xstats_type type;
210 };
211
212 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
213         ATL_XSTATS_FIELD(uprc),
214         ATL_XSTATS_FIELD(mprc),
215         ATL_XSTATS_FIELD(bprc),
216         ATL_XSTATS_FIELD(erpt),
217         ATL_XSTATS_FIELD(uptc),
218         ATL_XSTATS_FIELD(mptc),
219         ATL_XSTATS_FIELD(bptc),
220         ATL_XSTATS_FIELD(erpr),
221         ATL_XSTATS_FIELD(ubrc),
222         ATL_XSTATS_FIELD(ubtc),
223         ATL_XSTATS_FIELD(mbrc),
224         ATL_XSTATS_FIELD(mbtc),
225         ATL_XSTATS_FIELD(bbrc),
226         ATL_XSTATS_FIELD(bbtc),
227         /* Ingress Common Counters */
228         ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
229         ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
230         ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
231         ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
232         ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
233         ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
234         ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
235         ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
236         /* Ingress SA Counters */
237         ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
238         ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
239         ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
240         ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
241         ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
242         ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
243         ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
244         ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
245         ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
246         /* Egress Common Counters */
247         ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
248         ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
249         ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
250         ATL_MACSEC_XSTATS_FIELD(out_too_long),
251         /* Egress SC Counters */
252         ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
253         ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
254         /* Egress SA Counters */
255         ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
256         ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
257         ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
258         ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
259 };
260
261 static const struct eth_dev_ops atl_eth_dev_ops = {
262         .dev_configure        = atl_dev_configure,
263         .dev_start            = atl_dev_start,
264         .dev_stop             = atl_dev_stop,
265         .dev_set_link_up      = atl_dev_set_link_up,
266         .dev_set_link_down    = atl_dev_set_link_down,
267         .dev_close            = atl_dev_close,
268         .dev_reset            = atl_dev_reset,
269
270         /* PROMISC */
271         .promiscuous_enable   = atl_dev_promiscuous_enable,
272         .promiscuous_disable  = atl_dev_promiscuous_disable,
273         .allmulticast_enable  = atl_dev_allmulticast_enable,
274         .allmulticast_disable = atl_dev_allmulticast_disable,
275
276         /* Link */
277         .link_update          = atl_dev_link_update,
278
279         .get_reg              = atl_dev_get_regs,
280
281         /* Stats */
282         .stats_get            = atl_dev_stats_get,
283         .xstats_get           = atl_dev_xstats_get,
284         .xstats_get_names     = atl_dev_xstats_get_names,
285         .stats_reset          = atl_dev_stats_reset,
286         .xstats_reset         = atl_dev_stats_reset,
287
288         .fw_version_get       = atl_fw_version_get,
289         .dev_infos_get        = atl_dev_info_get,
290         .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
291
292         .mtu_set              = atl_dev_mtu_set,
293
294         /* VLAN */
295         .vlan_filter_set      = atl_vlan_filter_set,
296         .vlan_offload_set     = atl_vlan_offload_set,
297         .vlan_tpid_set        = atl_vlan_tpid_set,
298         .vlan_strip_queue_set = atl_vlan_strip_queue_set,
299
300         /* Queue Control */
301         .rx_queue_start       = atl_rx_queue_start,
302         .rx_queue_stop        = atl_rx_queue_stop,
303         .rx_queue_setup       = atl_rx_queue_setup,
304         .rx_queue_release     = atl_rx_queue_release,
305
306         .tx_queue_start       = atl_tx_queue_start,
307         .tx_queue_stop        = atl_tx_queue_stop,
308         .tx_queue_setup       = atl_tx_queue_setup,
309         .tx_queue_release     = atl_tx_queue_release,
310
311         .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
312         .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
313
314         /* EEPROM */
315         .get_eeprom_length    = atl_dev_get_eeprom_length,
316         .get_eeprom           = atl_dev_get_eeprom,
317         .set_eeprom           = atl_dev_set_eeprom,
318
319         /* Flow Control */
320         .flow_ctrl_get        = atl_flow_ctrl_get,
321         .flow_ctrl_set        = atl_flow_ctrl_set,
322
323         /* MAC */
324         .mac_addr_add         = atl_add_mac_addr,
325         .mac_addr_remove      = atl_remove_mac_addr,
326         .mac_addr_set         = atl_set_default_mac_addr,
327         .set_mc_addr_list     = atl_dev_set_mc_addr_list,
328         .rxq_info_get         = atl_rxq_info_get,
329         .txq_info_get         = atl_txq_info_get,
330
331         .reta_update          = atl_reta_update,
332         .reta_query           = atl_reta_query,
333         .rss_hash_update      = atl_rss_hash_update,
334         .rss_hash_conf_get    = atl_rss_hash_conf_get,
335 };
336
337 static inline int32_t
338 atl_reset_hw(struct aq_hw_s *hw)
339 {
340         return hw_atl_b0_hw_reset(hw);
341 }
342
343 static inline void
344 atl_enable_intr(struct rte_eth_dev *dev)
345 {
346         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
347
348         hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
349 }
350
351 static void
352 atl_disable_intr(struct aq_hw_s *hw)
353 {
354         PMD_INIT_FUNC_TRACE();
355         hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
356 }
357
358 static int
359 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
360 {
361         struct atl_adapter *adapter = eth_dev->data->dev_private;
362         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
363         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
364         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
365         int err = 0;
366
367         PMD_INIT_FUNC_TRACE();
368
369         eth_dev->dev_ops = &atl_eth_dev_ops;
370
371         eth_dev->rx_queue_count       = atl_rx_queue_count;
372         eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
373         eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
374
375         eth_dev->rx_pkt_burst = &atl_recv_pkts;
376         eth_dev->tx_pkt_burst = &atl_xmit_pkts;
377         eth_dev->tx_pkt_prepare = &atl_prep_pkts;
378
379         /* For secondary processes, the primary process has done all the work */
380         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
381                 return 0;
382
383         /* Vendor and Device ID need to be set before init of shared code */
384         hw->device_id = pci_dev->id.device_id;
385         hw->vendor_id = pci_dev->id.vendor_id;
386         hw->mmio = (void *)pci_dev->mem_resource[0].addr;
387
388         /* Hardware configuration - hardcode */
389         adapter->hw_cfg.is_lro = false;
390         adapter->hw_cfg.wol = false;
391         adapter->hw_cfg.is_rss = false;
392         adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
393
394         adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
395                           AQ_NIC_RATE_5G |
396                           AQ_NIC_RATE_2G5 |
397                           AQ_NIC_RATE_1G |
398                           AQ_NIC_RATE_100M;
399
400         adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
401         adapter->hw_cfg.aq_rss.indirection_table_size =
402                 HW_ATL_B0_RSS_REDIRECTION_MAX;
403
404         hw->aq_nic_cfg = &adapter->hw_cfg;
405
406         pthread_mutex_init(&hw->mbox_mutex, NULL);
407
408         /* disable interrupt */
409         atl_disable_intr(hw);
410
411         /* Allocate memory for storing MAC addresses */
412         eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
413                                         RTE_ETHER_ADDR_LEN, 0);
414         if (eth_dev->data->mac_addrs == NULL) {
415                 PMD_INIT_LOG(ERR, "MAC Malloc failed");
416                 return -ENOMEM;
417         }
418
419         err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
420         if (err)
421                 return err;
422
423         /* Copy the permanent MAC address */
424         if (hw->aq_fw_ops->get_mac_permanent(hw,
425                         eth_dev->data->mac_addrs->addr_bytes) != 0)
426                 return -EINVAL;
427
428         /* Reset the hw statistics */
429         atl_dev_stats_reset(eth_dev);
430
431         rte_intr_callback_register(intr_handle,
432                                    atl_dev_interrupt_handler, eth_dev);
433
434         /* enable uio/vfio intr/eventfd mapping */
435         rte_intr_enable(intr_handle);
436
437         /* enable support intr */
438         atl_enable_intr(eth_dev);
439
440         return err;
441 }
442
443 static int
444 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
445         struct rte_pci_device *pci_dev)
446 {
447         return rte_eth_dev_pci_generic_probe(pci_dev,
448                 sizeof(struct atl_adapter), eth_atl_dev_init);
449 }
450
451 static int
452 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
453 {
454         return rte_eth_dev_pci_generic_remove(pci_dev, atl_dev_close);
455 }
456
457 static int
458 atl_dev_configure(struct rte_eth_dev *dev)
459 {
460         struct atl_interrupt *intr =
461                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
462
463         PMD_INIT_FUNC_TRACE();
464
465         /* set flag to update link status after init */
466         intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
467
468         return 0;
469 }
470
471 /*
472  * Configure device link speed and setup link.
473  * It returns 0 on success.
474  */
475 static int
476 atl_dev_start(struct rte_eth_dev *dev)
477 {
478         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
479         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
480         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
481         uint32_t intr_vector = 0;
482         int status;
483         int err;
484
485         PMD_INIT_FUNC_TRACE();
486
487         /* set adapter started */
488         hw->adapter_stopped = 0;
489
490         if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
491                 PMD_INIT_LOG(ERR,
492                 "Invalid link_speeds for port %u, fix speed not supported",
493                                 dev->data->port_id);
494                 return -EINVAL;
495         }
496
497         /* disable uio/vfio intr/eventfd mapping */
498         rte_intr_disable(intr_handle);
499
500         /* reinitialize adapter
501          * this calls reset and start
502          */
503         status = atl_reset_hw(hw);
504         if (status != 0)
505                 return -EIO;
506
507         err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
508
509         hw_atl_b0_hw_start(hw);
510         /* check and configure queue intr-vector mapping */
511         if ((rte_intr_cap_multiple(intr_handle) ||
512             !RTE_ETH_DEV_SRIOV(dev).active) &&
513             dev->data->dev_conf.intr_conf.rxq != 0) {
514                 intr_vector = dev->data->nb_rx_queues;
515                 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
516                         PMD_INIT_LOG(ERR, "At most %d intr queues supported",
517                                         ATL_MAX_INTR_QUEUE_NUM);
518                         return -ENOTSUP;
519                 }
520                 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
521                         PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
522                         return -1;
523                 }
524         }
525
526         if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
527                 intr_handle->intr_vec = rte_zmalloc("intr_vec",
528                                     dev->data->nb_rx_queues * sizeof(int), 0);
529                 if (intr_handle->intr_vec == NULL) {
530                         PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
531                                      " intr_vec", dev->data->nb_rx_queues);
532                         return -ENOMEM;
533                 }
534         }
535
536         /* initialize transmission unit */
537         atl_tx_init(dev);
538
539         /* This can fail when allocating mbufs for descriptor rings */
540         err = atl_rx_init(dev);
541         if (err) {
542                 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
543                 goto error;
544         }
545
546         PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
547                 hw->fw_ver_actual >> 24,
548                 (hw->fw_ver_actual >> 16) & 0xFF,
549                 hw->fw_ver_actual & 0xFFFF);
550         PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
551
552         err = atl_start_queues(dev);
553         if (err < 0) {
554                 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
555                 goto error;
556         }
557
558         err = atl_dev_set_link_up(dev);
559
560         err = hw->aq_fw_ops->update_link_status(hw);
561
562         if (err)
563                 goto error;
564
565         dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
566
567         if (rte_intr_allow_others(intr_handle)) {
568                 /* check if lsc interrupt is enabled */
569                 if (dev->data->dev_conf.intr_conf.lsc != 0)
570                         atl_dev_lsc_interrupt_setup(dev, true);
571                 else
572                         atl_dev_lsc_interrupt_setup(dev, false);
573         } else {
574                 rte_intr_callback_unregister(intr_handle,
575                                              atl_dev_interrupt_handler, dev);
576                 if (dev->data->dev_conf.intr_conf.lsc != 0)
577                         PMD_INIT_LOG(INFO, "lsc won't enable because of"
578                                      " no intr multiplex");
579         }
580
581         /* check if rxq interrupt is enabled */
582         if (dev->data->dev_conf.intr_conf.rxq != 0 &&
583             rte_intr_dp_is_en(intr_handle))
584                 atl_dev_rxq_interrupt_setup(dev);
585
586         /* enable uio/vfio intr/eventfd mapping */
587         rte_intr_enable(intr_handle);
588
589         /* resume enabled intr since hw reset */
590         atl_enable_intr(dev);
591
592         return 0;
593
594 error:
595         atl_stop_queues(dev);
596         return -EIO;
597 }
598
599 /*
600  * Stop device: disable rx and tx functions to allow for reconfiguring.
601  */
602 static void
603 atl_dev_stop(struct rte_eth_dev *dev)
604 {
605         struct rte_eth_link link;
606         struct aq_hw_s *hw =
607                 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
609         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
610
611         PMD_INIT_FUNC_TRACE();
612         dev->data->dev_started = 0;
613
614         /* disable interrupts */
615         atl_disable_intr(hw);
616
617         /* reset the NIC */
618         atl_reset_hw(hw);
619         hw->adapter_stopped = 1;
620
621         atl_stop_queues(dev);
622
623         /* Clear stored conf */
624         dev->data->scattered_rx = 0;
625         dev->data->lro = 0;
626
627         /* Clear recorded link status */
628         memset(&link, 0, sizeof(link));
629         rte_eth_linkstatus_set(dev, &link);
630
631         if (!rte_intr_allow_others(intr_handle))
632                 /* resume to the default handler */
633                 rte_intr_callback_register(intr_handle,
634                                            atl_dev_interrupt_handler,
635                                            (void *)dev);
636
637         /* Clean datapath event and queue/vec mapping */
638         rte_intr_efd_disable(intr_handle);
639         if (intr_handle->intr_vec != NULL) {
640                 rte_free(intr_handle->intr_vec);
641                 intr_handle->intr_vec = NULL;
642         }
643 }
644
645 /*
646  * Set device link up: enable tx.
647  */
648 static int
649 atl_dev_set_link_up(struct rte_eth_dev *dev)
650 {
651         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
652         uint32_t link_speeds = dev->data->dev_conf.link_speeds;
653         uint32_t speed_mask = 0;
654
655         if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
656                 speed_mask = hw->aq_nic_cfg->link_speed_msk;
657         } else {
658                 if (link_speeds & ETH_LINK_SPEED_10G)
659                         speed_mask |= AQ_NIC_RATE_10G;
660                 if (link_speeds & ETH_LINK_SPEED_5G)
661                         speed_mask |= AQ_NIC_RATE_5G;
662                 if (link_speeds & ETH_LINK_SPEED_1G)
663                         speed_mask |= AQ_NIC_RATE_1G;
664                 if (link_speeds & ETH_LINK_SPEED_2_5G)
665                         speed_mask |=  AQ_NIC_RATE_2G5;
666                 if (link_speeds & ETH_LINK_SPEED_100M)
667                         speed_mask |= AQ_NIC_RATE_100M;
668         }
669
670         return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
671 }
672
673 /*
674  * Set device link down: disable tx.
675  */
676 static int
677 atl_dev_set_link_down(struct rte_eth_dev *dev)
678 {
679         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680
681         return hw->aq_fw_ops->set_link_speed(hw, 0);
682 }
683
684 /*
685  * Reset and stop device.
686  */
687 static int
688 atl_dev_close(struct rte_eth_dev *dev)
689 {
690         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
691         struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
692         struct aq_hw_s *hw;
693
694         PMD_INIT_FUNC_TRACE();
695
696         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
697                 return 0;
698
699         hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
700
701         atl_dev_stop(dev);
702
703         atl_free_queues(dev);
704
705         /* disable uio intr before callback unregister */
706         rte_intr_disable(intr_handle);
707         rte_intr_callback_unregister(intr_handle,
708                                      atl_dev_interrupt_handler, dev);
709
710         pthread_mutex_destroy(&hw->mbox_mutex);
711
712         return 0;
713 }
714
715 static int
716 atl_dev_reset(struct rte_eth_dev *dev)
717 {
718         int ret;
719
720         ret = atl_dev_close(dev);
721         if (ret)
722                 return ret;
723
724         ret = eth_atl_dev_init(dev);
725
726         return ret;
727 }
728
729 static int
730 atl_dev_configure_macsec(struct rte_eth_dev *dev)
731 {
732         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
733         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
734         struct aq_macsec_config *aqcfg = &cf->aq_macsec;
735         struct macsec_msg_fw_request msg_macsec;
736         struct macsec_msg_fw_response response;
737
738         if (!aqcfg->common.macsec_enabled ||
739             hw->aq_fw_ops->send_macsec_req == NULL)
740                 return 0;
741
742         memset(&msg_macsec, 0, sizeof(msg_macsec));
743
744         /* Creating set of sc/sa structures from parameters provided by DPDK */
745
746         /* Configure macsec */
747         msg_macsec.msg_type = macsec_cfg_msg;
748         msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
749         msg_macsec.cfg.interrupts_enabled = 1;
750
751         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
752
753         if (response.result)
754                 return -1;
755
756         memset(&msg_macsec, 0, sizeof(msg_macsec));
757
758         /* Configure TX SC */
759
760         msg_macsec.msg_type = macsec_add_tx_sc_msg;
761         msg_macsec.txsc.index = 0; /* TXSC always one (??) */
762         msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
763
764         /* MAC addr for TX */
765         msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
766         msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
767         msg_macsec.txsc.sa_mask = 0x3f;
768
769         msg_macsec.txsc.da_mask = 0;
770         msg_macsec.txsc.tci = 0x0B;
771         msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
772
773         /*
774          * Creating SCI (Secure Channel Identifier).
775          * SCI constructed from Source MAC and Port identifier
776          */
777         uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
778                                (msg_macsec.txsc.mac_sa[0] >> 16);
779         uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
780
781         uint32_t port_identifier = 1;
782
783         msg_macsec.txsc.sci[1] = sci_hi_part;
784         msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
785
786         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
787
788         if (response.result)
789                 return -1;
790
791         memset(&msg_macsec, 0, sizeof(msg_macsec));
792
793         /* Configure RX SC */
794
795         msg_macsec.msg_type = macsec_add_rx_sc_msg;
796         msg_macsec.rxsc.index = aqcfg->rxsc.pi;
797         msg_macsec.rxsc.replay_protect =
798                 aqcfg->common.replay_protection_enabled;
799         msg_macsec.rxsc.anti_replay_window = 0;
800
801         /* MAC addr for RX */
802         msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
803         msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
804         msg_macsec.rxsc.da_mask = 0;//0x3f;
805
806         msg_macsec.rxsc.sa_mask = 0;
807
808         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
809
810         if (response.result)
811                 return -1;
812
813         memset(&msg_macsec, 0, sizeof(msg_macsec));
814
815         /* Configure RX SC */
816
817         msg_macsec.msg_type = macsec_add_tx_sa_msg;
818         msg_macsec.txsa.index = aqcfg->txsa.idx;
819         msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
820
821         msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
822         msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
823         msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
824         msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
825
826         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
827
828         if (response.result)
829                 return -1;
830
831         memset(&msg_macsec, 0, sizeof(msg_macsec));
832
833         /* Configure RX SA */
834
835         msg_macsec.msg_type = macsec_add_rx_sa_msg;
836         msg_macsec.rxsa.index = aqcfg->rxsa.idx;
837         msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
838
839         msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
840         msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
841         msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
842         msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
843
844         hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
845
846         if (response.result)
847                 return -1;
848
849         return 0;
850 }
851
852 int atl_macsec_enable(struct rte_eth_dev *dev,
853                       uint8_t encr, uint8_t repl_prot)
854 {
855         struct aq_hw_cfg_s *cfg =
856                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
857
858         cfg->aq_macsec.common.macsec_enabled = 1;
859         cfg->aq_macsec.common.encryption_enabled = encr;
860         cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
861
862         return 0;
863 }
864
865 int atl_macsec_disable(struct rte_eth_dev *dev)
866 {
867         struct aq_hw_cfg_s *cfg =
868                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
869
870         cfg->aq_macsec.common.macsec_enabled = 0;
871
872         return 0;
873 }
874
875 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
876 {
877         struct aq_hw_cfg_s *cfg =
878                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
879
880         memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
881         memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
882                 RTE_ETHER_ADDR_LEN);
883
884         return 0;
885 }
886
887 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
888                            uint8_t *mac, uint16_t pi)
889 {
890         struct aq_hw_cfg_s *cfg =
891                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
892
893         memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
894         memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
895                 RTE_ETHER_ADDR_LEN);
896         cfg->aq_macsec.rxsc.pi = pi;
897
898         return 0;
899 }
900
901 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
902                            uint8_t idx, uint8_t an,
903                            uint32_t pn, uint8_t *key)
904 {
905         struct aq_hw_cfg_s *cfg =
906                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
907
908         cfg->aq_macsec.txsa.idx = idx;
909         cfg->aq_macsec.txsa.pn = pn;
910         cfg->aq_macsec.txsa.an = an;
911
912         memcpy(&cfg->aq_macsec.txsa.key, key, 16);
913         return 0;
914 }
915
916 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
917                            uint8_t idx, uint8_t an,
918                            uint32_t pn, uint8_t *key)
919 {
920         struct aq_hw_cfg_s *cfg =
921                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
922
923         cfg->aq_macsec.rxsa.idx = idx;
924         cfg->aq_macsec.rxsa.pn = pn;
925         cfg->aq_macsec.rxsa.an = an;
926
927         memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
928         return 0;
929 }
930
931 static int
932 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
933 {
934         struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
935         struct aq_hw_s *hw = &adapter->hw;
936         struct atl_sw_stats *swstats = &adapter->sw_stats;
937         unsigned int i;
938
939         hw->aq_fw_ops->update_stats(hw);
940
941         /* Fill out the rte_eth_stats statistics structure */
942         stats->ipackets = hw->curr_stats.dma_pkt_rc;
943         stats->ibytes = hw->curr_stats.dma_oct_rc;
944         stats->imissed = hw->curr_stats.dpc;
945         stats->ierrors = hw->curr_stats.erpt;
946
947         stats->opackets = hw->curr_stats.dma_pkt_tc;
948         stats->obytes = hw->curr_stats.dma_oct_tc;
949         stats->oerrors = 0;
950
951         stats->rx_nombuf = swstats->rx_nombuf;
952
953         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
954                 stats->q_ipackets[i] = swstats->q_ipackets[i];
955                 stats->q_opackets[i] = swstats->q_opackets[i];
956                 stats->q_ibytes[i] = swstats->q_ibytes[i];
957                 stats->q_obytes[i] = swstats->q_obytes[i];
958                 stats->q_errors[i] = swstats->q_errors[i];
959         }
960         return 0;
961 }
962
963 static int
964 atl_dev_stats_reset(struct rte_eth_dev *dev)
965 {
966         struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
967         struct aq_hw_s *hw = &adapter->hw;
968
969         hw->aq_fw_ops->update_stats(hw);
970
971         /* Reset software totals */
972         memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
973
974         memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
975
976         return 0;
977 }
978
979 static int
980 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
981 {
982         struct atl_adapter *adapter =
983                 (struct atl_adapter *)dev->data->dev_private;
984
985         struct aq_hw_s *hw = &adapter->hw;
986         unsigned int i, count = 0;
987
988         for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
989                 if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
990                         ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
991                         continue;
992
993                 count++;
994         }
995
996         return count;
997 }
998
999 static int
1000 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1001                          struct rte_eth_xstat_name *xstats_names,
1002                          unsigned int size)
1003 {
1004         unsigned int i;
1005         unsigned int count = atl_dev_xstats_get_count(dev);
1006
1007         if (xstats_names) {
1008                 for (i = 0; i < size && i < count; i++) {
1009                         snprintf(xstats_names[i].name,
1010                                 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1011                                 atl_xstats_tbl[i].name);
1012                 }
1013         }
1014
1015         return count;
1016 }
1017
1018 static int
1019 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1020                    unsigned int n)
1021 {
1022         struct atl_adapter *adapter = dev->data->dev_private;
1023         struct aq_hw_s *hw = &adapter->hw;
1024         struct get_stats req = { 0 };
1025         struct macsec_msg_fw_request msg = { 0 };
1026         struct macsec_msg_fw_response resp = { 0 };
1027         int err = -1;
1028         unsigned int i;
1029         unsigned int count = atl_dev_xstats_get_count(dev);
1030
1031         if (!stats)
1032                 return count;
1033
1034         if (hw->aq_fw_ops->send_macsec_req != NULL) {
1035                 req.ingress_sa_index = 0xff;
1036                 req.egress_sc_index = 0xff;
1037                 req.egress_sa_index = 0xff;
1038
1039                 msg.msg_type = macsec_get_stats_msg;
1040                 msg.stats = req;
1041
1042                 err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1043         }
1044
1045         for (i = 0; i < n && i < count; i++) {
1046                 stats[i].id = i;
1047
1048                 switch (atl_xstats_tbl[i].type) {
1049                 case XSTATS_TYPE_MSM:
1050                         stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1051                                          atl_xstats_tbl[i].offset);
1052                         break;
1053                 case XSTATS_TYPE_MACSEC:
1054                         if (!err) {
1055                                 stats[i].value =
1056                                         *(u64 *)((uint8_t *)&resp.stats +
1057                                         atl_xstats_tbl[i].offset);
1058                         }
1059                         break;
1060                 }
1061         }
1062
1063         return i;
1064 }
1065
1066 static int
1067 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1068 {
1069         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1070         uint32_t fw_ver = 0;
1071         unsigned int ret = 0;
1072
1073         ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1074         if (ret)
1075                 return -EIO;
1076
1077         ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1078                        (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1079
1080         ret += 1; /* add string null-terminator */
1081
1082         if (fw_size < ret)
1083                 return ret;
1084
1085         return 0;
1086 }
1087
1088 static int
1089 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1090 {
1091         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1092
1093         dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1094         dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1095
1096         dev_info->min_rx_bufsize = 1024;
1097         dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1098         dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1099         dev_info->max_vfs = pci_dev->max_vfs;
1100
1101         dev_info->max_hash_mac_addrs = 0;
1102         dev_info->max_vmdq_pools = 0;
1103         dev_info->vmdq_queue_num = 0;
1104
1105         dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1106
1107         dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1108
1109
1110         dev_info->default_rxconf = (struct rte_eth_rxconf) {
1111                 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1112         };
1113
1114         dev_info->default_txconf = (struct rte_eth_txconf) {
1115                 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1116         };
1117
1118         dev_info->rx_desc_lim = rx_desc_lim;
1119         dev_info->tx_desc_lim = tx_desc_lim;
1120
1121         dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1122         dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1123         dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1124
1125         dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1126         dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1127         dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1128         dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1129
1130         return 0;
1131 }
1132
1133 static const uint32_t *
1134 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1135 {
1136         static const uint32_t ptypes[] = {
1137                 RTE_PTYPE_L2_ETHER,
1138                 RTE_PTYPE_L2_ETHER_ARP,
1139                 RTE_PTYPE_L2_ETHER_VLAN,
1140                 RTE_PTYPE_L3_IPV4,
1141                 RTE_PTYPE_L3_IPV6,
1142                 RTE_PTYPE_L4_TCP,
1143                 RTE_PTYPE_L4_UDP,
1144                 RTE_PTYPE_L4_SCTP,
1145                 RTE_PTYPE_L4_ICMP,
1146                 RTE_PTYPE_UNKNOWN
1147         };
1148
1149         if (dev->rx_pkt_burst == atl_recv_pkts)
1150                 return ptypes;
1151
1152         return NULL;
1153 }
1154
1155 static void
1156 atl_dev_delayed_handler(void *param)
1157 {
1158         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1159
1160         atl_dev_configure_macsec(dev);
1161 }
1162
1163
1164 /* return 0 means link status changed, -1 means not changed */
1165 static int
1166 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1167 {
1168         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1169         struct rte_eth_link link, old;
1170         u32 fc = AQ_NIC_FC_OFF;
1171         int err = 0;
1172
1173         link.link_status = ETH_LINK_DOWN;
1174         link.link_speed = 0;
1175         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1176         link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1177         memset(&old, 0, sizeof(old));
1178
1179         /* load old link status */
1180         rte_eth_linkstatus_get(dev, &old);
1181
1182         /* read current link status */
1183         err = hw->aq_fw_ops->update_link_status(hw);
1184
1185         if (err)
1186                 return 0;
1187
1188         if (hw->aq_link_status.mbps == 0) {
1189                 /* write default (down) link status */
1190                 rte_eth_linkstatus_set(dev, &link);
1191                 if (link.link_status == old.link_status)
1192                         return -1;
1193                 return 0;
1194         }
1195
1196         link.link_status = ETH_LINK_UP;
1197         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1198         link.link_speed = hw->aq_link_status.mbps;
1199
1200         rte_eth_linkstatus_set(dev, &link);
1201
1202         if (link.link_status == old.link_status)
1203                 return -1;
1204
1205         /* Driver has to update flow control settings on RX block
1206          * on any link event.
1207          * We should query FW whether it negotiated FC.
1208          */
1209         if (hw->aq_fw_ops->get_flow_control) {
1210                 hw->aq_fw_ops->get_flow_control(hw, &fc);
1211                 hw_atl_b0_set_fc(hw, fc, 0U);
1212         }
1213
1214         if (rte_eal_alarm_set(1000 * 1000,
1215                               atl_dev_delayed_handler, (void *)dev) < 0)
1216                 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1217
1218         return 0;
1219 }
1220
1221 static int
1222 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1223 {
1224         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1225
1226         hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1227
1228         return 0;
1229 }
1230
1231 static int
1232 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1233 {
1234         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1235
1236         hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1237
1238         return 0;
1239 }
1240
1241 static int
1242 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1243 {
1244         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1245
1246         hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1247
1248         return 0;
1249 }
1250
1251 static int
1252 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1253 {
1254         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1255
1256         if (dev->data->promiscuous == 1)
1257                 return 0; /* must remain in all_multicast mode */
1258
1259         hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1260
1261         return 0;
1262 }
1263
1264 /**
1265  * It clears the interrupt causes and enables the interrupt.
1266  * It will be called once only during nic initialized.
1267  *
1268  * @param dev
1269  *  Pointer to struct rte_eth_dev.
1270  * @param on
1271  *  Enable or Disable.
1272  *
1273  * @return
1274  *  - On success, zero.
1275  *  - On failure, a negative value.
1276  */
1277
1278 static int
1279 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1280 {
1281         atl_dev_link_status_print(dev);
1282         return 0;
1283 }
1284
1285 static int
1286 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1287 {
1288         return 0;
1289 }
1290
1291
1292 static int
1293 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1294 {
1295         struct atl_interrupt *intr =
1296                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1297         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1298         u64 cause = 0;
1299
1300         hw_atl_b0_hw_irq_read(hw, &cause);
1301
1302         atl_disable_intr(hw);
1303
1304         if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1305                 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1306
1307         return 0;
1308 }
1309
1310 /**
1311  * It gets and then prints the link status.
1312  *
1313  * @param dev
1314  *  Pointer to struct rte_eth_dev.
1315  *
1316  * @return
1317  *  - On success, zero.
1318  *  - On failure, a negative value.
1319  */
1320 static void
1321 atl_dev_link_status_print(struct rte_eth_dev *dev)
1322 {
1323         struct rte_eth_link link;
1324
1325         memset(&link, 0, sizeof(link));
1326         rte_eth_linkstatus_get(dev, &link);
1327         if (link.link_status) {
1328                 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1329                                         (int)(dev->data->port_id),
1330                                         (unsigned int)link.link_speed,
1331                         link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1332                                         "full-duplex" : "half-duplex");
1333         } else {
1334                 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1335                                 (int)(dev->data->port_id));
1336         }
1337
1338
1339 #ifdef DEBUG
1340 {
1341         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1342
1343         PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1344                                 pci_dev->addr.domain,
1345                                 pci_dev->addr.bus,
1346                                 pci_dev->addr.devid,
1347                                 pci_dev->addr.function);
1348 }
1349 #endif
1350
1351         PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1352 }
1353
1354 /*
1355  * It executes link_update after knowing an interrupt occurred.
1356  *
1357  * @param dev
1358  *  Pointer to struct rte_eth_dev.
1359  *
1360  * @return
1361  *  - On success, zero.
1362  *  - On failure, a negative value.
1363  */
1364 static int
1365 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1366                            struct rte_intr_handle *intr_handle)
1367 {
1368         struct atl_interrupt *intr =
1369                 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1370         struct atl_adapter *adapter = dev->data->dev_private;
1371         struct aq_hw_s *hw = &adapter->hw;
1372
1373         if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1374                 goto done;
1375
1376         intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1377
1378         /* Notify userapp if link status changed */
1379         if (!atl_dev_link_update(dev, 0)) {
1380                 atl_dev_link_status_print(dev);
1381                 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1382         } else {
1383                 if (hw->aq_fw_ops->send_macsec_req == NULL)
1384                         goto done;
1385
1386                 /* Check macsec Keys expired */
1387                 struct get_stats req = { 0 };
1388                 struct macsec_msg_fw_request msg = { 0 };
1389                 struct macsec_msg_fw_response resp = { 0 };
1390
1391                 req.ingress_sa_index = 0x0;
1392                 req.egress_sc_index = 0x0;
1393                 req.egress_sa_index = 0x0;
1394                 msg.msg_type = macsec_get_stats_msg;
1395                 msg.stats = req;
1396
1397                 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1398                 if (err) {
1399                         PMD_DRV_LOG(ERR, "send_macsec_req fail");
1400                         goto done;
1401                 }
1402                 if (resp.stats.egress_threshold_expired ||
1403                     resp.stats.ingress_threshold_expired ||
1404                     resp.stats.egress_expired ||
1405                     resp.stats.ingress_expired) {
1406                         PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1407                         rte_eth_dev_callback_process(dev,
1408                                 RTE_ETH_EVENT_MACSEC, NULL);
1409                 }
1410         }
1411 done:
1412         atl_enable_intr(dev);
1413         rte_intr_ack(intr_handle);
1414
1415         return 0;
1416 }
1417
1418 /**
1419  * Interrupt handler triggered by NIC  for handling
1420  * specific interrupt.
1421  *
1422  * @param handle
1423  *  Pointer to interrupt handle.
1424  * @param param
1425  *  The address of parameter (struct rte_eth_dev *) regsitered before.
1426  *
1427  * @return
1428  *  void
1429  */
1430 static void
1431 atl_dev_interrupt_handler(void *param)
1432 {
1433         struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1434
1435         atl_dev_interrupt_get_status(dev);
1436         atl_dev_interrupt_action(dev, dev->intr_handle);
1437 }
1438
1439
1440 static int
1441 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1442 {
1443         return SFP_EEPROM_SIZE;
1444 }
1445
1446 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1447                        struct rte_dev_eeprom_info *eeprom)
1448 {
1449         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1450         uint32_t dev_addr = SMBUS_DEVICE_ID;
1451
1452         if (hw->aq_fw_ops->get_eeprom == NULL)
1453                 return -ENOTSUP;
1454
1455         if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1456             eeprom->data == NULL)
1457                 return -EINVAL;
1458
1459         if (eeprom->magic > 0x7F)
1460                 return -EINVAL;
1461
1462         if (eeprom->magic)
1463                 dev_addr = eeprom->magic;
1464
1465         return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1466                                          eeprom->length, eeprom->offset);
1467 }
1468
1469 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1470                        struct rte_dev_eeprom_info *eeprom)
1471 {
1472         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1473         uint32_t dev_addr = SMBUS_DEVICE_ID;
1474
1475         if (hw->aq_fw_ops->set_eeprom == NULL)
1476                 return -ENOTSUP;
1477
1478         if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1479             eeprom->data == NULL)
1480                 return -EINVAL;
1481
1482         if (eeprom->magic > 0x7F)
1483                 return -EINVAL;
1484
1485         if (eeprom->magic)
1486                 dev_addr = eeprom->magic;
1487
1488         return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1489                                          eeprom->length, eeprom->offset);
1490 }
1491
1492 static int
1493 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1494 {
1495         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1496         u32 mif_id;
1497         int err;
1498
1499         if (regs->data == NULL) {
1500                 regs->length = hw_atl_utils_hw_get_reg_length();
1501                 regs->width = sizeof(u32);
1502                 return 0;
1503         }
1504
1505         /* Only full register dump is supported */
1506         if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1507                 return -ENOTSUP;
1508
1509         err = hw_atl_utils_hw_get_regs(hw, regs->data);
1510
1511         /* Device version */
1512         mif_id = hw_atl_reg_glb_mif_id_get(hw);
1513         regs->version = mif_id & 0xFFU;
1514
1515         return err;
1516 }
1517
1518 static int
1519 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1520 {
1521         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1522         u32 fc = AQ_NIC_FC_OFF;
1523
1524         if (hw->aq_fw_ops->get_flow_control == NULL)
1525                 return -ENOTSUP;
1526
1527         hw->aq_fw_ops->get_flow_control(hw, &fc);
1528
1529         if (fc == AQ_NIC_FC_OFF)
1530                 fc_conf->mode = RTE_FC_NONE;
1531         else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1532                 fc_conf->mode = RTE_FC_FULL;
1533         else if (fc & AQ_NIC_FC_RX)
1534                 fc_conf->mode = RTE_FC_RX_PAUSE;
1535         else if (fc & AQ_NIC_FC_TX)
1536                 fc_conf->mode = RTE_FC_TX_PAUSE;
1537
1538         return 0;
1539 }
1540
1541 static int
1542 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1543 {
1544         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1545         uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1546
1547
1548         if (hw->aq_fw_ops->set_flow_control == NULL)
1549                 return -ENOTSUP;
1550
1551         if (fc_conf->mode == RTE_FC_NONE)
1552                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1553         else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1554                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1555         else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1556                 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1557         else if (fc_conf->mode == RTE_FC_FULL)
1558                 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1559
1560         if (old_flow_control != hw->aq_nic_cfg->flow_control)
1561                 return hw->aq_fw_ops->set_flow_control(hw);
1562
1563         return 0;
1564 }
1565
1566 static int
1567 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1568                     u8 *mac_addr, bool enable)
1569 {
1570         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1571         unsigned int h = 0U;
1572         unsigned int l = 0U;
1573         int err;
1574
1575         if (mac_addr) {
1576                 h = (mac_addr[0] << 8) | (mac_addr[1]);
1577                 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1578                         (mac_addr[4] << 8) | mac_addr[5];
1579         }
1580
1581         hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1582         hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1583         hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1584
1585         if (enable)
1586                 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1587
1588         err = aq_hw_err_from_flags(hw);
1589
1590         return err;
1591 }
1592
1593 static int
1594 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1595                         uint32_t index __rte_unused, uint32_t pool __rte_unused)
1596 {
1597         if (rte_is_zero_ether_addr(mac_addr)) {
1598                 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1599                 return -EINVAL;
1600         }
1601
1602         return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1603 }
1604
1605 static void
1606 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1607 {
1608         atl_update_mac_addr(dev, index, NULL, false);
1609 }
1610
1611 static int
1612 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1613 {
1614         atl_remove_mac_addr(dev, 0);
1615         atl_add_mac_addr(dev, addr, 0, 0);
1616         return 0;
1617 }
1618
1619 static int
1620 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1621 {
1622         struct rte_eth_dev_info dev_info;
1623         int ret;
1624         uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1625
1626         ret = atl_dev_info_get(dev, &dev_info);
1627         if (ret != 0)
1628                 return ret;
1629
1630         if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1631                 return -EINVAL;
1632
1633         /* update max frame size */
1634         dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1635
1636         return 0;
1637 }
1638
1639 static int
1640 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1641 {
1642         struct aq_hw_cfg_s *cfg =
1643                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1644         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1645         int err = 0;
1646         int i = 0;
1647
1648         PMD_INIT_FUNC_TRACE();
1649
1650         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1651                 if (cfg->vlan_filter[i] == vlan_id) {
1652                         if (!on) {
1653                                 /* Disable VLAN filter. */
1654                                 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1655
1656                                 /* Clear VLAN filter entry */
1657                                 cfg->vlan_filter[i] = 0;
1658                         }
1659                         break;
1660                 }
1661         }
1662
1663         /* VLAN_ID was not found. So, nothing to delete. */
1664         if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1665                 goto exit;
1666
1667         /* VLAN_ID already exist, or already removed above. Nothing to do. */
1668         if (i != HW_ATL_B0_MAX_VLAN_IDS)
1669                 goto exit;
1670
1671         /* Try to found free VLAN filter to add new VLAN_ID */
1672         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1673                 if (cfg->vlan_filter[i] == 0)
1674                         break;
1675         }
1676
1677         if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1678                 /* We have no free VLAN filter to add new VLAN_ID*/
1679                 err = -ENOMEM;
1680                 goto exit;
1681         }
1682
1683         cfg->vlan_filter[i] = vlan_id;
1684         hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1685         hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1686         hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1687
1688 exit:
1689         /* Enable VLAN promisc mode if vlan_filter empty  */
1690         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1691                 if (cfg->vlan_filter[i] != 0)
1692                         break;
1693         }
1694
1695         hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1696
1697         return err;
1698 }
1699
1700 static int
1701 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1702 {
1703         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1704         struct aq_hw_cfg_s *cfg =
1705                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1706         int i;
1707
1708         PMD_INIT_FUNC_TRACE();
1709
1710         for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1711                 if (cfg->vlan_filter[i])
1712                         hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1713         }
1714         return 0;
1715 }
1716
1717 static int
1718 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1719 {
1720         struct aq_hw_cfg_s *cfg =
1721                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1722         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1723         int ret = 0;
1724         int i;
1725
1726         PMD_INIT_FUNC_TRACE();
1727
1728         ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1729
1730         cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1731
1732         for (i = 0; i < dev->data->nb_rx_queues; i++)
1733                 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1734
1735         if (mask & ETH_VLAN_EXTEND_MASK)
1736                 ret = -ENOTSUP;
1737
1738         return ret;
1739 }
1740
1741 static int
1742 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1743                   uint16_t tpid)
1744 {
1745         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1746         int err = 0;
1747
1748         PMD_INIT_FUNC_TRACE();
1749
1750         switch (vlan_type) {
1751         case ETH_VLAN_TYPE_INNER:
1752                 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1753                 break;
1754         case ETH_VLAN_TYPE_OUTER:
1755                 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1756                 break;
1757         default:
1758                 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1759                 err = -ENOTSUP;
1760         }
1761
1762         return err;
1763 }
1764
1765 static void
1766 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1767 {
1768         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1769
1770         PMD_INIT_FUNC_TRACE();
1771
1772         if (queue_id > dev->data->nb_rx_queues) {
1773                 PMD_DRV_LOG(ERR, "Invalid queue id");
1774                 return;
1775         }
1776
1777         hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1778 }
1779
1780 static int
1781 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1782                           struct rte_ether_addr *mc_addr_set,
1783                           uint32_t nb_mc_addr)
1784 {
1785         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1786         u32 i;
1787
1788         if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1789                 return -EINVAL;
1790
1791         /* Update whole uc filters table */
1792         for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1793                 u8 *mac_addr = NULL;
1794                 u32 l = 0, h = 0;
1795
1796                 if (i < nb_mc_addr) {
1797                         mac_addr = mc_addr_set[i].addr_bytes;
1798                         l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1799                                 (mac_addr[4] << 8) | mac_addr[5];
1800                         h = (mac_addr[0] << 8) | mac_addr[1];
1801                 }
1802
1803                 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1804                 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1805                                                         HW_ATL_B0_MAC_MIN + i);
1806                 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1807                                                         HW_ATL_B0_MAC_MIN + i);
1808                 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1809                                            HW_ATL_B0_MAC_MIN + i);
1810         }
1811
1812         return 0;
1813 }
1814
1815 static int
1816 atl_reta_update(struct rte_eth_dev *dev,
1817                    struct rte_eth_rss_reta_entry64 *reta_conf,
1818                    uint16_t reta_size)
1819 {
1820         int i;
1821         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1822         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1823
1824         for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1825                 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1826                                         dev->data->nb_rx_queues - 1);
1827
1828         hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1829         return 0;
1830 }
1831
1832 static int
1833 atl_reta_query(struct rte_eth_dev *dev,
1834                     struct rte_eth_rss_reta_entry64 *reta_conf,
1835                     uint16_t reta_size)
1836 {
1837         int i;
1838         struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1839
1840         for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1841                 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1842         reta_conf->mask = ~0U;
1843         return 0;
1844 }
1845
1846 static int
1847 atl_rss_hash_update(struct rte_eth_dev *dev,
1848                                  struct rte_eth_rss_conf *rss_conf)
1849 {
1850         struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1851         struct aq_hw_cfg_s *cfg =
1852                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1853         static u8 def_rss_key[40] = {
1854                 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1855                 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1856                 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1857                 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1858                 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1859         };
1860
1861         cfg->is_rss = !!rss_conf->rss_hf;
1862         if (rss_conf->rss_key) {
1863                 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1864                        rss_conf->rss_key_len);
1865                 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1866         } else {
1867                 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1868                        sizeof(def_rss_key));
1869                 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1870         }
1871
1872         hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1873         hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1874         return 0;
1875 }
1876
1877 static int
1878 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1879                                  struct rte_eth_rss_conf *rss_conf)
1880 {
1881         struct aq_hw_cfg_s *cfg =
1882                 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1883
1884         rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1885         if (rss_conf->rss_key) {
1886                 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1887                 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1888                        rss_conf->rss_key_len);
1889         }
1890
1891         return 0;
1892 }
1893
1894 static bool
1895 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1896 {
1897         if (strcmp(dev->device->driver->name, drv->driver.name))
1898                 return false;
1899
1900         return true;
1901 }
1902
1903 bool
1904 is_atlantic_supported(struct rte_eth_dev *dev)
1905 {
1906         return is_device_supported(dev, &rte_atl_pmd);
1907 }
1908
1909 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1910 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1911 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1912 RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE);
1913 RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE);