1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
20 static int atl_dev_configure(struct rte_eth_dev *dev);
21 static int atl_dev_start(struct rte_eth_dev *dev);
22 static void atl_dev_stop(struct rte_eth_dev *dev);
23 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
24 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
25 static void atl_dev_close(struct rte_eth_dev *dev);
26 static int atl_dev_reset(struct rte_eth_dev *dev);
27 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
28 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
29 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
30 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
31 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
33 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
34 struct rte_eth_xstat_name *xstats_names,
37 static int atl_dev_stats_get(struct rte_eth_dev *dev,
38 struct rte_eth_stats *stats);
40 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
41 struct rte_eth_xstat *stats, unsigned int n);
43 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
45 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
48 static void atl_dev_info_get(struct rte_eth_dev *dev,
49 struct rte_eth_dev_info *dev_info);
51 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
53 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
56 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
57 uint16_t vlan_id, int on);
59 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
61 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
62 uint16_t queue_id, int on);
64 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
65 enum rte_vlan_type vlan_type, uint16_t tpid);
68 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
69 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
70 struct rte_dev_eeprom_info *eeprom);
71 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
72 struct rte_dev_eeprom_info *eeprom);
75 static int atl_dev_get_regs(struct rte_eth_dev *dev,
76 struct rte_dev_reg_info *regs);
79 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
80 struct rte_eth_fc_conf *fc_conf);
81 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
82 struct rte_eth_fc_conf *fc_conf);
84 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
87 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
88 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
89 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
90 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
91 struct rte_intr_handle *handle);
92 static void atl_dev_interrupt_handler(void *param);
95 static int atl_add_mac_addr(struct rte_eth_dev *dev,
96 struct ether_addr *mac_addr,
97 uint32_t index, uint32_t pool);
98 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
99 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
100 struct ether_addr *mac_addr);
102 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
103 struct ether_addr *mc_addr_set,
104 uint32_t nb_mc_addr);
107 static int atl_reta_update(struct rte_eth_dev *dev,
108 struct rte_eth_rss_reta_entry64 *reta_conf,
110 static int atl_reta_query(struct rte_eth_dev *dev,
111 struct rte_eth_rss_reta_entry64 *reta_conf,
113 static int atl_rss_hash_update(struct rte_eth_dev *dev,
114 struct rte_eth_rss_conf *rss_conf);
115 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
116 struct rte_eth_rss_conf *rss_conf);
119 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
120 struct rte_pci_device *pci_dev);
121 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
123 static void atl_dev_info_get(struct rte_eth_dev *dev,
124 struct rte_eth_dev_info *dev_info);
126 int atl_logtype_init;
127 int atl_logtype_driver;
130 * The set of PCI devices this driver supports
132 static const struct rte_pci_id pci_id_atl_map[] = {
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
137 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
144 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
149 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
150 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
151 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
153 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
154 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
155 { .vendor_id = 0, /* sentinel */ },
158 static struct rte_pci_driver rte_atl_pmd = {
159 .id_table = pci_id_atl_map,
160 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
161 RTE_PCI_DRV_IOVA_AS_VA,
162 .probe = eth_atl_pci_probe,
163 .remove = eth_atl_pci_remove,
166 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
167 | DEV_RX_OFFLOAD_IPV4_CKSUM \
168 | DEV_RX_OFFLOAD_UDP_CKSUM \
169 | DEV_RX_OFFLOAD_TCP_CKSUM \
170 | DEV_RX_OFFLOAD_JUMBO_FRAME \
171 | DEV_RX_OFFLOAD_MACSEC_STRIP \
172 | DEV_RX_OFFLOAD_VLAN_FILTER)
174 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
175 | DEV_TX_OFFLOAD_IPV4_CKSUM \
176 | DEV_TX_OFFLOAD_UDP_CKSUM \
177 | DEV_TX_OFFLOAD_TCP_CKSUM \
178 | DEV_TX_OFFLOAD_TCP_TSO \
179 | DEV_TX_OFFLOAD_MACSEC_INSERT \
180 | DEV_TX_OFFLOAD_MULTI_SEGS)
182 static const struct rte_eth_desc_lim rx_desc_lim = {
183 .nb_max = ATL_MAX_RING_DESC,
184 .nb_min = ATL_MIN_RING_DESC,
185 .nb_align = ATL_RXD_ALIGN,
188 static const struct rte_eth_desc_lim tx_desc_lim = {
189 .nb_max = ATL_MAX_RING_DESC,
190 .nb_min = ATL_MIN_RING_DESC,
191 .nb_align = ATL_TXD_ALIGN,
192 .nb_seg_max = ATL_TX_MAX_SEG,
193 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
196 enum atl_xstats_type {
201 #define ATL_XSTATS_FIELD(name) { \
203 offsetof(struct aq_stats_s, name), \
207 #define ATL_MACSEC_XSTATS_FIELD(name) { \
209 offsetof(struct macsec_stats, name), \
213 struct atl_xstats_tbl_s {
216 enum atl_xstats_type type;
219 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
220 ATL_XSTATS_FIELD(uprc),
221 ATL_XSTATS_FIELD(mprc),
222 ATL_XSTATS_FIELD(bprc),
223 ATL_XSTATS_FIELD(erpt),
224 ATL_XSTATS_FIELD(uptc),
225 ATL_XSTATS_FIELD(mptc),
226 ATL_XSTATS_FIELD(bptc),
227 ATL_XSTATS_FIELD(erpr),
228 ATL_XSTATS_FIELD(ubrc),
229 ATL_XSTATS_FIELD(ubtc),
230 ATL_XSTATS_FIELD(mbrc),
231 ATL_XSTATS_FIELD(mbtc),
232 ATL_XSTATS_FIELD(bbrc),
233 ATL_XSTATS_FIELD(bbtc),
234 /* Ingress Common Counters */
235 ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
236 ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
237 ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
238 ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
239 ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
240 ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
241 ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
242 ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
243 /* Ingress SA Counters */
244 ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
245 ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
246 ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
247 ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
248 ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
249 ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
250 ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
251 ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
252 ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
253 /* Egress Common Counters */
254 ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
255 ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
256 ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
257 ATL_MACSEC_XSTATS_FIELD(out_too_long),
258 /* Egress SC Counters */
259 ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
260 ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
261 /* Egress SA Counters */
262 ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
263 ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
264 ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
265 ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
268 static const struct eth_dev_ops atl_eth_dev_ops = {
269 .dev_configure = atl_dev_configure,
270 .dev_start = atl_dev_start,
271 .dev_stop = atl_dev_stop,
272 .dev_set_link_up = atl_dev_set_link_up,
273 .dev_set_link_down = atl_dev_set_link_down,
274 .dev_close = atl_dev_close,
275 .dev_reset = atl_dev_reset,
278 .promiscuous_enable = atl_dev_promiscuous_enable,
279 .promiscuous_disable = atl_dev_promiscuous_disable,
280 .allmulticast_enable = atl_dev_allmulticast_enable,
281 .allmulticast_disable = atl_dev_allmulticast_disable,
284 .link_update = atl_dev_link_update,
286 .get_reg = atl_dev_get_regs,
289 .stats_get = atl_dev_stats_get,
290 .xstats_get = atl_dev_xstats_get,
291 .xstats_get_names = atl_dev_xstats_get_names,
292 .stats_reset = atl_dev_stats_reset,
293 .xstats_reset = atl_dev_stats_reset,
295 .fw_version_get = atl_fw_version_get,
296 .dev_infos_get = atl_dev_info_get,
297 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
299 .mtu_set = atl_dev_mtu_set,
302 .vlan_filter_set = atl_vlan_filter_set,
303 .vlan_offload_set = atl_vlan_offload_set,
304 .vlan_tpid_set = atl_vlan_tpid_set,
305 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
308 .rx_queue_start = atl_rx_queue_start,
309 .rx_queue_stop = atl_rx_queue_stop,
310 .rx_queue_setup = atl_rx_queue_setup,
311 .rx_queue_release = atl_rx_queue_release,
313 .tx_queue_start = atl_tx_queue_start,
314 .tx_queue_stop = atl_tx_queue_stop,
315 .tx_queue_setup = atl_tx_queue_setup,
316 .tx_queue_release = atl_tx_queue_release,
318 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
319 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
321 .rx_queue_count = atl_rx_queue_count,
322 .rx_descriptor_status = atl_dev_rx_descriptor_status,
323 .tx_descriptor_status = atl_dev_tx_descriptor_status,
326 .get_eeprom_length = atl_dev_get_eeprom_length,
327 .get_eeprom = atl_dev_get_eeprom,
328 .set_eeprom = atl_dev_set_eeprom,
331 .flow_ctrl_get = atl_flow_ctrl_get,
332 .flow_ctrl_set = atl_flow_ctrl_set,
335 .mac_addr_add = atl_add_mac_addr,
336 .mac_addr_remove = atl_remove_mac_addr,
337 .mac_addr_set = atl_set_default_mac_addr,
338 .set_mc_addr_list = atl_dev_set_mc_addr_list,
339 .rxq_info_get = atl_rxq_info_get,
340 .txq_info_get = atl_txq_info_get,
342 .reta_update = atl_reta_update,
343 .reta_query = atl_reta_query,
344 .rss_hash_update = atl_rss_hash_update,
345 .rss_hash_conf_get = atl_rss_hash_conf_get,
348 static inline int32_t
349 atl_reset_hw(struct aq_hw_s *hw)
351 return hw_atl_b0_hw_reset(hw);
355 atl_enable_intr(struct rte_eth_dev *dev)
357 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
359 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
363 atl_disable_intr(struct aq_hw_s *hw)
365 PMD_INIT_FUNC_TRACE();
366 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
370 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
372 struct atl_adapter *adapter =
373 (struct atl_adapter *)eth_dev->data->dev_private;
374 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
375 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
376 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
379 PMD_INIT_FUNC_TRACE();
381 eth_dev->dev_ops = &atl_eth_dev_ops;
382 eth_dev->rx_pkt_burst = &atl_recv_pkts;
383 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
384 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
386 /* For secondary processes, the primary process has done all the work */
387 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
390 /* Vendor and Device ID need to be set before init of shared code */
391 hw->device_id = pci_dev->id.device_id;
392 hw->vendor_id = pci_dev->id.vendor_id;
393 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
395 /* Hardware configuration - hardcode */
396 adapter->hw_cfg.is_lro = false;
397 adapter->hw_cfg.wol = false;
398 adapter->hw_cfg.is_rss = false;
399 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
401 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
407 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
408 adapter->hw_cfg.aq_rss.indirection_table_size =
409 HW_ATL_B0_RSS_REDIRECTION_MAX;
411 hw->aq_nic_cfg = &adapter->hw_cfg;
413 /* disable interrupt */
414 atl_disable_intr(hw);
416 /* Allocate memory for storing MAC addresses */
417 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
418 if (eth_dev->data->mac_addrs == NULL) {
419 PMD_INIT_LOG(ERR, "MAC Malloc failed");
423 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
427 /* Copy the permanent MAC address */
428 if (hw->aq_fw_ops->get_mac_permanent(hw,
429 eth_dev->data->mac_addrs->addr_bytes) != 0)
432 /* Reset the hw statistics */
433 atl_dev_stats_reset(eth_dev);
435 rte_intr_callback_register(intr_handle,
436 atl_dev_interrupt_handler, eth_dev);
438 /* enable uio/vfio intr/eventfd mapping */
439 rte_intr_enable(intr_handle);
441 /* enable support intr */
442 atl_enable_intr(eth_dev);
448 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
450 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
451 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
454 PMD_INIT_FUNC_TRACE();
456 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
459 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
461 if (hw->adapter_stopped == 0)
462 atl_dev_close(eth_dev);
464 eth_dev->dev_ops = NULL;
465 eth_dev->rx_pkt_burst = NULL;
466 eth_dev->tx_pkt_burst = NULL;
468 /* disable uio intr before callback unregister */
469 rte_intr_disable(intr_handle);
470 rte_intr_callback_unregister(intr_handle,
471 atl_dev_interrupt_handler, eth_dev);
473 rte_free(eth_dev->data->mac_addrs);
474 eth_dev->data->mac_addrs = NULL;
480 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
481 struct rte_pci_device *pci_dev)
483 return rte_eth_dev_pci_generic_probe(pci_dev,
484 sizeof(struct atl_adapter), eth_atl_dev_init);
488 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
490 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
494 atl_dev_configure(struct rte_eth_dev *dev)
496 struct atl_interrupt *intr =
497 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
499 PMD_INIT_FUNC_TRACE();
501 /* set flag to update link status after init */
502 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
508 * Configure device link speed and setup link.
509 * It returns 0 on success.
512 atl_dev_start(struct rte_eth_dev *dev)
514 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
516 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
517 uint32_t intr_vector = 0;
521 PMD_INIT_FUNC_TRACE();
523 /* set adapter started */
524 hw->adapter_stopped = 0;
526 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
528 "Invalid link_speeds for port %u, fix speed not supported",
533 /* disable uio/vfio intr/eventfd mapping */
534 rte_intr_disable(intr_handle);
536 /* reinitialize adapter
537 * this calls reset and start
539 status = atl_reset_hw(hw);
543 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
545 hw_atl_b0_hw_start(hw);
546 /* check and configure queue intr-vector mapping */
547 if ((rte_intr_cap_multiple(intr_handle) ||
548 !RTE_ETH_DEV_SRIOV(dev).active) &&
549 dev->data->dev_conf.intr_conf.rxq != 0) {
550 intr_vector = dev->data->nb_rx_queues;
551 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
552 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
553 ATL_MAX_INTR_QUEUE_NUM);
556 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
557 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
562 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
563 intr_handle->intr_vec = rte_zmalloc("intr_vec",
564 dev->data->nb_rx_queues * sizeof(int), 0);
565 if (intr_handle->intr_vec == NULL) {
566 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
567 " intr_vec", dev->data->nb_rx_queues);
572 /* initialize transmission unit */
575 /* This can fail when allocating mbufs for descriptor rings */
576 err = atl_rx_init(dev);
578 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
582 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
583 hw->fw_ver_actual >> 24,
584 (hw->fw_ver_actual >> 16) & 0xFF,
585 hw->fw_ver_actual & 0xFFFF);
586 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
588 err = atl_start_queues(dev);
590 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
594 err = atl_dev_set_link_up(dev);
596 err = hw->aq_fw_ops->update_link_status(hw);
601 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
606 if (rte_intr_allow_others(intr_handle)) {
607 /* check if lsc interrupt is enabled */
608 if (dev->data->dev_conf.intr_conf.lsc != 0)
609 atl_dev_lsc_interrupt_setup(dev, true);
611 atl_dev_lsc_interrupt_setup(dev, false);
613 rte_intr_callback_unregister(intr_handle,
614 atl_dev_interrupt_handler, dev);
615 if (dev->data->dev_conf.intr_conf.lsc != 0)
616 PMD_INIT_LOG(INFO, "lsc won't enable because of"
617 " no intr multiplex");
620 /* check if rxq interrupt is enabled */
621 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
622 rte_intr_dp_is_en(intr_handle))
623 atl_dev_rxq_interrupt_setup(dev);
625 /* enable uio/vfio intr/eventfd mapping */
626 rte_intr_enable(intr_handle);
628 /* resume enabled intr since hw reset */
629 atl_enable_intr(dev);
634 atl_stop_queues(dev);
639 * Stop device: disable rx and tx functions to allow for reconfiguring.
642 atl_dev_stop(struct rte_eth_dev *dev)
644 struct rte_eth_link link;
646 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
647 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
648 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
650 PMD_INIT_FUNC_TRACE();
652 /* disable interrupts */
653 atl_disable_intr(hw);
657 hw->adapter_stopped = 1;
659 atl_stop_queues(dev);
661 /* Clear stored conf */
662 dev->data->scattered_rx = 0;
665 /* Clear recorded link status */
666 memset(&link, 0, sizeof(link));
667 rte_eth_linkstatus_set(dev, &link);
669 if (!rte_intr_allow_others(intr_handle))
670 /* resume to the default handler */
671 rte_intr_callback_register(intr_handle,
672 atl_dev_interrupt_handler,
675 /* Clean datapath event and queue/vec mapping */
676 rte_intr_efd_disable(intr_handle);
677 if (intr_handle->intr_vec != NULL) {
678 rte_free(intr_handle->intr_vec);
679 intr_handle->intr_vec = NULL;
684 * Set device link up: enable tx.
687 atl_dev_set_link_up(struct rte_eth_dev *dev)
689 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690 uint32_t link_speeds = dev->data->dev_conf.link_speeds;
691 uint32_t speed_mask = 0;
693 if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
694 speed_mask = hw->aq_nic_cfg->link_speed_msk;
696 if (link_speeds & ETH_LINK_SPEED_10G)
697 speed_mask |= AQ_NIC_RATE_10G;
698 if (link_speeds & ETH_LINK_SPEED_5G)
699 speed_mask |= AQ_NIC_RATE_5G;
700 if (link_speeds & ETH_LINK_SPEED_1G)
701 speed_mask |= AQ_NIC_RATE_1G;
702 if (link_speeds & ETH_LINK_SPEED_2_5G)
703 speed_mask |= AQ_NIC_RATE_2G5;
704 if (link_speeds & ETH_LINK_SPEED_100M)
705 speed_mask |= AQ_NIC_RATE_100M;
708 return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
712 * Set device link down: disable tx.
715 atl_dev_set_link_down(struct rte_eth_dev *dev)
717 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
719 return hw->aq_fw_ops->set_link_speed(hw, 0);
723 * Reset and stop device.
726 atl_dev_close(struct rte_eth_dev *dev)
728 PMD_INIT_FUNC_TRACE();
732 atl_free_queues(dev);
736 atl_dev_reset(struct rte_eth_dev *dev)
740 ret = eth_atl_dev_uninit(dev);
744 ret = eth_atl_dev_init(dev);
750 atl_dev_configure_macsec(struct rte_eth_dev *dev)
752 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
753 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
754 struct aq_macsec_config *aqcfg = &cf->aq_macsec;
755 struct macsec_msg_fw_request msg_macsec;
756 struct macsec_msg_fw_response response;
758 if (!aqcfg->common.macsec_enabled ||
759 hw->aq_fw_ops->send_macsec_req == NULL)
762 memset(&msg_macsec, 0, sizeof(msg_macsec));
764 /* Creating set of sc/sa structures from parameters provided by DPDK */
766 /* Configure macsec */
767 msg_macsec.msg_type = macsec_cfg_msg;
768 msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
769 msg_macsec.cfg.interrupts_enabled = 1;
771 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
776 memset(&msg_macsec, 0, sizeof(msg_macsec));
778 /* Configure TX SC */
780 msg_macsec.msg_type = macsec_add_tx_sc_msg;
781 msg_macsec.txsc.index = 0; /* TXSC always one (??) */
782 msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
784 /* MAC addr for TX */
785 msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
786 msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
787 msg_macsec.txsc.sa_mask = 0x3f;
789 msg_macsec.txsc.da_mask = 0;
790 msg_macsec.txsc.tci = 0x0B;
791 msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
794 * Creating SCI (Secure Channel Identifier).
795 * SCI constructed from Source MAC and Port identifier
797 uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
798 (msg_macsec.txsc.mac_sa[0] >> 16);
799 uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
801 uint32_t port_identifier = 1;
803 msg_macsec.txsc.sci[1] = sci_hi_part;
804 msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
806 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
811 memset(&msg_macsec, 0, sizeof(msg_macsec));
813 /* Configure RX SC */
815 msg_macsec.msg_type = macsec_add_rx_sc_msg;
816 msg_macsec.rxsc.index = aqcfg->rxsc.pi;
817 msg_macsec.rxsc.replay_protect =
818 aqcfg->common.replay_protection_enabled;
819 msg_macsec.rxsc.anti_replay_window = 0;
821 /* MAC addr for RX */
822 msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
823 msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
824 msg_macsec.rxsc.da_mask = 0;//0x3f;
826 msg_macsec.rxsc.sa_mask = 0;
828 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
833 memset(&msg_macsec, 0, sizeof(msg_macsec));
835 /* Configure RX SC */
837 msg_macsec.msg_type = macsec_add_tx_sa_msg;
838 msg_macsec.txsa.index = aqcfg->txsa.idx;
839 msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
841 msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
842 msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
843 msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
844 msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
846 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
851 memset(&msg_macsec, 0, sizeof(msg_macsec));
853 /* Configure RX SA */
855 msg_macsec.msg_type = macsec_add_rx_sa_msg;
856 msg_macsec.rxsa.index = aqcfg->rxsa.idx;
857 msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
859 msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
860 msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
861 msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
862 msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
864 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
872 int atl_macsec_enable(struct rte_eth_dev *dev,
873 uint8_t encr, uint8_t repl_prot)
875 struct aq_hw_cfg_s *cfg =
876 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
878 cfg->aq_macsec.common.macsec_enabled = 1;
879 cfg->aq_macsec.common.encryption_enabled = encr;
880 cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
885 int atl_macsec_disable(struct rte_eth_dev *dev)
887 struct aq_hw_cfg_s *cfg =
888 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
890 cfg->aq_macsec.common.macsec_enabled = 0;
895 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
897 struct aq_hw_cfg_s *cfg =
898 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
900 memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
901 memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac, ETHER_ADDR_LEN);
906 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
907 uint8_t *mac, uint16_t pi)
909 struct aq_hw_cfg_s *cfg =
910 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
912 memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
913 memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac, ETHER_ADDR_LEN);
914 cfg->aq_macsec.rxsc.pi = pi;
919 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
920 uint8_t idx, uint8_t an,
921 uint32_t pn, uint8_t *key)
923 struct aq_hw_cfg_s *cfg =
924 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
926 cfg->aq_macsec.txsa.idx = idx;
927 cfg->aq_macsec.txsa.pn = pn;
928 cfg->aq_macsec.txsa.an = an;
930 memcpy(&cfg->aq_macsec.txsa.key, key, 16);
934 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
935 uint8_t idx, uint8_t an,
936 uint32_t pn, uint8_t *key)
938 struct aq_hw_cfg_s *cfg =
939 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
941 cfg->aq_macsec.rxsa.idx = idx;
942 cfg->aq_macsec.rxsa.pn = pn;
943 cfg->aq_macsec.rxsa.an = an;
945 memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
950 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
952 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
953 struct aq_hw_s *hw = &adapter->hw;
954 struct atl_sw_stats *swstats = &adapter->sw_stats;
957 hw->aq_fw_ops->update_stats(hw);
959 /* Fill out the rte_eth_stats statistics structure */
960 stats->ipackets = hw->curr_stats.dma_pkt_rc;
961 stats->ibytes = hw->curr_stats.dma_oct_rc;
962 stats->imissed = hw->curr_stats.dpc;
963 stats->ierrors = hw->curr_stats.erpt;
965 stats->opackets = hw->curr_stats.dma_pkt_tc;
966 stats->obytes = hw->curr_stats.dma_oct_tc;
969 stats->rx_nombuf = swstats->rx_nombuf;
971 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
972 stats->q_ipackets[i] = swstats->q_ipackets[i];
973 stats->q_opackets[i] = swstats->q_opackets[i];
974 stats->q_ibytes[i] = swstats->q_ibytes[i];
975 stats->q_obytes[i] = swstats->q_obytes[i];
976 stats->q_errors[i] = swstats->q_errors[i];
982 atl_dev_stats_reset(struct rte_eth_dev *dev)
984 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
985 struct aq_hw_s *hw = &adapter->hw;
987 hw->aq_fw_ops->update_stats(hw);
989 /* Reset software totals */
990 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
992 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
996 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
997 struct rte_eth_xstat_name *xstats_names,
1003 return RTE_DIM(atl_xstats_tbl);
1005 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
1006 strlcpy(xstats_names[i].name, atl_xstats_tbl[i].name,
1007 RTE_ETH_XSTATS_NAME_SIZE);
1013 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1016 struct atl_adapter *adapter =
1017 (struct atl_adapter *)dev->data->dev_private;
1018 struct aq_hw_s *hw = &adapter->hw;
1019 struct get_stats req = { 0 };
1020 struct macsec_msg_fw_request msg = { 0 };
1021 struct macsec_msg_fw_response resp = { 0 };
1028 if (hw->aq_fw_ops->send_macsec_req != NULL) {
1029 req.ingress_sa_index = 0xff;
1030 req.egress_sc_index = 0xff;
1031 req.egress_sa_index = 0xff;
1033 msg.msg_type = macsec_get_stats_msg;
1036 err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1039 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
1042 switch (atl_xstats_tbl[i].type) {
1043 case XSTATS_TYPE_MSM:
1044 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1045 atl_xstats_tbl[i].offset);
1047 case XSTATS_TYPE_MACSEC:
1050 stats[i].value = *(u64 *)((uint8_t *)&resp.stats +
1051 atl_xstats_tbl[i].offset);
1060 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1062 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1063 uint32_t fw_ver = 0;
1064 unsigned int ret = 0;
1066 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1070 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1071 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1073 ret += 1; /* add string null-terminator */
1082 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1084 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1086 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1087 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1089 dev_info->min_rx_bufsize = 1024;
1090 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1091 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1092 dev_info->max_vfs = pci_dev->max_vfs;
1094 dev_info->max_hash_mac_addrs = 0;
1095 dev_info->max_vmdq_pools = 0;
1096 dev_info->vmdq_queue_num = 0;
1098 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1100 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1103 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1104 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1107 dev_info->default_txconf = (struct rte_eth_txconf) {
1108 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1111 dev_info->rx_desc_lim = rx_desc_lim;
1112 dev_info->tx_desc_lim = tx_desc_lim;
1114 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1115 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1116 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1118 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1119 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1120 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1121 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1124 static const uint32_t *
1125 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1127 static const uint32_t ptypes[] = {
1129 RTE_PTYPE_L2_ETHER_ARP,
1130 RTE_PTYPE_L2_ETHER_VLAN,
1140 if (dev->rx_pkt_burst == atl_recv_pkts)
1147 atl_dev_delayed_handler(void *param)
1149 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1151 atl_dev_configure_macsec(dev);
1155 /* return 0 means link status changed, -1 means not changed */
1157 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1159 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1160 struct rte_eth_link link, old;
1163 link.link_status = ETH_LINK_DOWN;
1164 link.link_speed = 0;
1165 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1166 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1167 memset(&old, 0, sizeof(old));
1169 /* load old link status */
1170 rte_eth_linkstatus_get(dev, &old);
1172 /* read current link status */
1173 err = hw->aq_fw_ops->update_link_status(hw);
1178 if (hw->aq_link_status.mbps == 0) {
1179 /* write default (down) link status */
1180 rte_eth_linkstatus_set(dev, &link);
1181 if (link.link_status == old.link_status)
1186 link.link_status = ETH_LINK_UP;
1187 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1188 link.link_speed = hw->aq_link_status.mbps;
1190 rte_eth_linkstatus_set(dev, &link);
1192 if (link.link_status == old.link_status)
1195 if (rte_eal_alarm_set(1000 * 1000,
1196 atl_dev_delayed_handler, (void *)dev) < 0)
1197 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1203 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1205 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1207 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1211 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1213 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1219 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1221 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1223 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1227 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1229 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1231 if (dev->data->promiscuous == 1)
1232 return; /* must remain in all_multicast mode */
1234 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1238 * It clears the interrupt causes and enables the interrupt.
1239 * It will be called once only during nic initialized.
1242 * Pointer to struct rte_eth_dev.
1244 * Enable or Disable.
1247 * - On success, zero.
1248 * - On failure, a negative value.
1252 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1254 atl_dev_link_status_print(dev);
1259 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1266 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1268 struct atl_interrupt *intr =
1269 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1270 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1273 hw_atl_b0_hw_irq_read(hw, &cause);
1275 atl_disable_intr(hw);
1277 if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1278 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1284 * It gets and then prints the link status.
1287 * Pointer to struct rte_eth_dev.
1290 * - On success, zero.
1291 * - On failure, a negative value.
1294 atl_dev_link_status_print(struct rte_eth_dev *dev)
1296 struct rte_eth_link link;
1298 memset(&link, 0, sizeof(link));
1299 rte_eth_linkstatus_get(dev, &link);
1300 if (link.link_status) {
1301 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1302 (int)(dev->data->port_id),
1303 (unsigned int)link.link_speed,
1304 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1305 "full-duplex" : "half-duplex");
1307 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1308 (int)(dev->data->port_id));
1314 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1316 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1317 pci_dev->addr.domain,
1319 pci_dev->addr.devid,
1320 pci_dev->addr.function);
1324 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1328 * It executes link_update after knowing an interrupt occurred.
1331 * Pointer to struct rte_eth_dev.
1334 * - On success, zero.
1335 * - On failure, a negative value.
1338 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1339 struct rte_intr_handle *intr_handle)
1341 struct atl_interrupt *intr =
1342 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1343 struct atl_adapter *adapter =
1344 (struct atl_adapter *)dev->data->dev_private;
1345 struct aq_hw_s *hw = &adapter->hw;
1347 if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1350 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1352 /* Notify userapp if link status changed */
1353 if (!atl_dev_link_update(dev, 0)) {
1354 atl_dev_link_status_print(dev);
1355 _rte_eth_dev_callback_process(dev,
1356 RTE_ETH_EVENT_INTR_LSC, NULL);
1358 if (hw->aq_fw_ops->send_macsec_req == NULL)
1361 /* Check macsec Keys expired */
1362 struct get_stats req = { 0 };
1363 struct macsec_msg_fw_request msg = { 0 };
1364 struct macsec_msg_fw_response resp = { 0 };
1366 req.ingress_sa_index = 0x0;
1367 req.egress_sc_index = 0x0;
1368 req.egress_sa_index = 0x0;
1369 msg.msg_type = macsec_get_stats_msg;
1372 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1374 PMD_DRV_LOG(ERR, "send_macsec_req fail");
1377 if (resp.stats.egress_threshold_expired ||
1378 resp.stats.ingress_threshold_expired ||
1379 resp.stats.egress_expired ||
1380 resp.stats.ingress_expired) {
1381 PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1382 _rte_eth_dev_callback_process(dev,
1383 RTE_ETH_EVENT_MACSEC, NULL);
1387 atl_enable_intr(dev);
1388 rte_intr_enable(intr_handle);
1394 * Interrupt handler triggered by NIC for handling
1395 * specific interrupt.
1398 * Pointer to interrupt handle.
1400 * The address of parameter (struct rte_eth_dev *) regsitered before.
1406 atl_dev_interrupt_handler(void *param)
1408 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1410 atl_dev_interrupt_get_status(dev);
1411 atl_dev_interrupt_action(dev, dev->intr_handle);
1414 #define SFP_EEPROM_SIZE 0xff
1417 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1419 return SFP_EEPROM_SIZE;
1422 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1423 struct rte_dev_eeprom_info *eeprom)
1425 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1426 uint32_t dev_addr = SMBUS_DEVICE_ID;
1428 if (hw->aq_fw_ops->get_eeprom == NULL)
1431 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1432 eeprom->data == NULL)
1436 dev_addr = eeprom->magic;
1438 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1439 eeprom->length, eeprom->offset);
1442 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1443 struct rte_dev_eeprom_info *eeprom)
1445 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1446 uint32_t dev_addr = SMBUS_DEVICE_ID;
1448 if (hw->aq_fw_ops->set_eeprom == NULL)
1451 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1452 eeprom->data == NULL)
1456 dev_addr = eeprom->magic;
1458 return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1459 eeprom->length, eeprom->offset);
1463 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1465 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1469 if (regs->data == NULL) {
1470 regs->length = hw_atl_utils_hw_get_reg_length();
1471 regs->width = sizeof(u32);
1475 /* Only full register dump is supported */
1476 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1479 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1481 /* Device version */
1482 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1483 regs->version = mif_id & 0xFFU;
1489 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1491 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1493 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1494 fc_conf->mode = RTE_FC_NONE;
1495 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1496 fc_conf->mode = RTE_FC_FULL;
1497 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1498 fc_conf->mode = RTE_FC_RX_PAUSE;
1499 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1500 fc_conf->mode = RTE_FC_TX_PAUSE;
1506 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1508 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1509 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1512 if (hw->aq_fw_ops->set_flow_control == NULL)
1515 if (fc_conf->mode == RTE_FC_NONE)
1516 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1517 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1518 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1519 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1520 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1521 else if (fc_conf->mode == RTE_FC_FULL)
1522 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1524 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1525 return hw->aq_fw_ops->set_flow_control(hw);
1531 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1532 u8 *mac_addr, bool enable)
1534 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1535 unsigned int h = 0U;
1536 unsigned int l = 0U;
1540 h = (mac_addr[0] << 8) | (mac_addr[1]);
1541 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1542 (mac_addr[4] << 8) | mac_addr[5];
1545 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1546 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1547 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1550 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1552 err = aq_hw_err_from_flags(hw);
1558 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1559 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1561 if (is_zero_ether_addr(mac_addr)) {
1562 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1566 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1570 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1572 atl_update_mac_addr(dev, index, NULL, false);
1576 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1578 atl_remove_mac_addr(dev, 0);
1579 atl_add_mac_addr(dev, addr, 0, 0);
1584 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1586 struct rte_eth_dev_info dev_info;
1587 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1589 atl_dev_info_get(dev, &dev_info);
1591 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1594 /* update max frame size */
1595 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1601 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1603 struct aq_hw_cfg_s *cfg =
1604 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1605 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1609 PMD_INIT_FUNC_TRACE();
1611 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1612 if (cfg->vlan_filter[i] == vlan_id) {
1614 /* Disable VLAN filter. */
1615 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1617 /* Clear VLAN filter entry */
1618 cfg->vlan_filter[i] = 0;
1624 /* VLAN_ID was not found. So, nothing to delete. */
1625 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1628 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1629 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1632 /* Try to found free VLAN filter to add new VLAN_ID */
1633 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1634 if (cfg->vlan_filter[i] == 0)
1638 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1639 /* We have no free VLAN filter to add new VLAN_ID*/
1644 cfg->vlan_filter[i] = vlan_id;
1645 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1646 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1647 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1650 /* Enable VLAN promisc mode if vlan_filter empty */
1651 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1652 if (cfg->vlan_filter[i] != 0)
1656 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1662 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1664 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1665 struct aq_hw_cfg_s *cfg =
1666 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1669 PMD_INIT_FUNC_TRACE();
1671 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1672 if (cfg->vlan_filter[i])
1673 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1679 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1681 struct aq_hw_cfg_s *cfg =
1682 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1683 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1687 PMD_INIT_FUNC_TRACE();
1689 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1691 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1693 for (i = 0; i < dev->data->nb_rx_queues; i++)
1694 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1696 if (mask & ETH_VLAN_EXTEND_MASK)
1703 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1706 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1709 PMD_INIT_FUNC_TRACE();
1711 switch (vlan_type) {
1712 case ETH_VLAN_TYPE_INNER:
1713 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1715 case ETH_VLAN_TYPE_OUTER:
1716 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1719 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1727 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1729 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1731 PMD_INIT_FUNC_TRACE();
1733 if (queue_id > dev->data->nb_rx_queues) {
1734 PMD_DRV_LOG(ERR, "Invalid queue id");
1738 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1742 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1743 struct ether_addr *mc_addr_set,
1744 uint32_t nb_mc_addr)
1746 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1749 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1752 /* Update whole uc filters table */
1753 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1754 u8 *mac_addr = NULL;
1757 if (i < nb_mc_addr) {
1758 mac_addr = mc_addr_set[i].addr_bytes;
1759 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1760 (mac_addr[4] << 8) | mac_addr[5];
1761 h = (mac_addr[0] << 8) | mac_addr[1];
1764 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1765 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1766 HW_ATL_B0_MAC_MIN + i);
1767 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1768 HW_ATL_B0_MAC_MIN + i);
1769 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1770 HW_ATL_B0_MAC_MIN + i);
1777 atl_reta_update(struct rte_eth_dev *dev,
1778 struct rte_eth_rss_reta_entry64 *reta_conf,
1782 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1783 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1785 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1786 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1787 dev->data->nb_rx_queues - 1);
1789 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1794 atl_reta_query(struct rte_eth_dev *dev,
1795 struct rte_eth_rss_reta_entry64 *reta_conf,
1799 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1801 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1802 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1803 reta_conf->mask = ~0U;
1808 atl_rss_hash_update(struct rte_eth_dev *dev,
1809 struct rte_eth_rss_conf *rss_conf)
1811 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1812 struct aq_hw_cfg_s *cfg =
1813 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1814 static u8 def_rss_key[40] = {
1815 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1816 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1817 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1818 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1819 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1822 cfg->is_rss = !!rss_conf->rss_hf;
1823 if (rss_conf->rss_key) {
1824 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1825 rss_conf->rss_key_len);
1826 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1828 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1829 sizeof(def_rss_key));
1830 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1833 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1834 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1839 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1840 struct rte_eth_rss_conf *rss_conf)
1842 struct aq_hw_cfg_s *cfg =
1843 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1845 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1846 if (rss_conf->rss_key) {
1847 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1848 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1849 rss_conf->rss_key_len);
1856 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1858 if (strcmp(dev->device->driver->name, drv->driver.name))
1865 is_atlantic_supported(struct rte_eth_dev *dev)
1867 return is_device_supported(dev, &rte_atl_pmd);
1870 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1871 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1872 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1874 RTE_INIT(atl_init_log)
1876 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1877 if (atl_logtype_init >= 0)
1878 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1879 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1880 if (atl_logtype_driver >= 0)
1881 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);