1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_string_fns.h>
6 #include <rte_ethdev_pci.h>
9 #include "atl_ethdev.h"
10 #include "atl_common.h"
11 #include "atl_hw_regs.h"
13 #include "hw_atl/hw_atl_llh.h"
14 #include "hw_atl/hw_atl_b0.h"
15 #include "hw_atl/hw_atl_b0_internal.h"
17 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static int atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static int atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static int atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static int atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static int atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 struct rte_eth_xstat_name *xstats_names,
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 struct rte_eth_stats *stats);
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 struct rte_eth_xstat *stats, unsigned int n);
41 static int atl_dev_stats_reset(struct rte_eth_dev *dev);
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
48 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
51 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
52 uint16_t vlan_id, int on);
54 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
56 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
57 uint16_t queue_id, int on);
59 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
60 enum rte_vlan_type vlan_type, uint16_t tpid);
63 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
64 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
65 struct rte_dev_eeprom_info *eeprom);
66 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
67 struct rte_dev_eeprom_info *eeprom);
70 static int atl_dev_get_regs(struct rte_eth_dev *dev,
71 struct rte_dev_reg_info *regs);
74 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
75 struct rte_eth_fc_conf *fc_conf);
76 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
77 struct rte_eth_fc_conf *fc_conf);
79 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
82 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
83 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
84 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
85 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
86 struct rte_intr_handle *handle);
87 static void atl_dev_interrupt_handler(void *param);
90 static int atl_add_mac_addr(struct rte_eth_dev *dev,
91 struct rte_ether_addr *mac_addr,
92 uint32_t index, uint32_t pool);
93 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
94 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
95 struct rte_ether_addr *mac_addr);
97 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
98 struct rte_ether_addr *mc_addr_set,
102 static int atl_reta_update(struct rte_eth_dev *dev,
103 struct rte_eth_rss_reta_entry64 *reta_conf,
105 static int atl_reta_query(struct rte_eth_dev *dev,
106 struct rte_eth_rss_reta_entry64 *reta_conf,
108 static int atl_rss_hash_update(struct rte_eth_dev *dev,
109 struct rte_eth_rss_conf *rss_conf);
110 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
111 struct rte_eth_rss_conf *rss_conf);
114 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
115 struct rte_pci_device *pci_dev);
116 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
118 static int atl_dev_info_get(struct rte_eth_dev *dev,
119 struct rte_eth_dev_info *dev_info);
122 * The set of PCI devices this driver supports
124 static const struct rte_pci_id pci_id_atl_map[] = {
125 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
126 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
127 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
128 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
129 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
136 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
143 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
147 { .vendor_id = 0, /* sentinel */ },
150 static struct rte_pci_driver rte_atl_pmd = {
151 .id_table = pci_id_atl_map,
152 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
153 .probe = eth_atl_pci_probe,
154 .remove = eth_atl_pci_remove,
157 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
158 | DEV_RX_OFFLOAD_IPV4_CKSUM \
159 | DEV_RX_OFFLOAD_UDP_CKSUM \
160 | DEV_RX_OFFLOAD_TCP_CKSUM \
161 | DEV_RX_OFFLOAD_JUMBO_FRAME \
162 | DEV_RX_OFFLOAD_MACSEC_STRIP \
163 | DEV_RX_OFFLOAD_VLAN_FILTER)
165 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
166 | DEV_TX_OFFLOAD_IPV4_CKSUM \
167 | DEV_TX_OFFLOAD_UDP_CKSUM \
168 | DEV_TX_OFFLOAD_TCP_CKSUM \
169 | DEV_TX_OFFLOAD_TCP_TSO \
170 | DEV_TX_OFFLOAD_MACSEC_INSERT \
171 | DEV_TX_OFFLOAD_MULTI_SEGS)
173 #define SFP_EEPROM_SIZE 0x100
175 static const struct rte_eth_desc_lim rx_desc_lim = {
176 .nb_max = ATL_MAX_RING_DESC,
177 .nb_min = ATL_MIN_RING_DESC,
178 .nb_align = ATL_RXD_ALIGN,
181 static const struct rte_eth_desc_lim tx_desc_lim = {
182 .nb_max = ATL_MAX_RING_DESC,
183 .nb_min = ATL_MIN_RING_DESC,
184 .nb_align = ATL_TXD_ALIGN,
185 .nb_seg_max = ATL_TX_MAX_SEG,
186 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
189 enum atl_xstats_type {
194 #define ATL_XSTATS_FIELD(name) { \
196 offsetof(struct aq_stats_s, name), \
200 #define ATL_MACSEC_XSTATS_FIELD(name) { \
202 offsetof(struct macsec_stats, name), \
206 struct atl_xstats_tbl_s {
209 enum atl_xstats_type type;
212 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
213 ATL_XSTATS_FIELD(uprc),
214 ATL_XSTATS_FIELD(mprc),
215 ATL_XSTATS_FIELD(bprc),
216 ATL_XSTATS_FIELD(erpt),
217 ATL_XSTATS_FIELD(uptc),
218 ATL_XSTATS_FIELD(mptc),
219 ATL_XSTATS_FIELD(bptc),
220 ATL_XSTATS_FIELD(erpr),
221 ATL_XSTATS_FIELD(ubrc),
222 ATL_XSTATS_FIELD(ubtc),
223 ATL_XSTATS_FIELD(mbrc),
224 ATL_XSTATS_FIELD(mbtc),
225 ATL_XSTATS_FIELD(bbrc),
226 ATL_XSTATS_FIELD(bbtc),
227 /* Ingress Common Counters */
228 ATL_MACSEC_XSTATS_FIELD(in_ctl_pkts),
229 ATL_MACSEC_XSTATS_FIELD(in_tagged_miss_pkts),
230 ATL_MACSEC_XSTATS_FIELD(in_untagged_miss_pkts),
231 ATL_MACSEC_XSTATS_FIELD(in_notag_pkts),
232 ATL_MACSEC_XSTATS_FIELD(in_untagged_pkts),
233 ATL_MACSEC_XSTATS_FIELD(in_bad_tag_pkts),
234 ATL_MACSEC_XSTATS_FIELD(in_no_sci_pkts),
235 ATL_MACSEC_XSTATS_FIELD(in_unknown_sci_pkts),
236 /* Ingress SA Counters */
237 ATL_MACSEC_XSTATS_FIELD(in_untagged_hit_pkts),
238 ATL_MACSEC_XSTATS_FIELD(in_not_using_sa),
239 ATL_MACSEC_XSTATS_FIELD(in_unused_sa),
240 ATL_MACSEC_XSTATS_FIELD(in_not_valid_pkts),
241 ATL_MACSEC_XSTATS_FIELD(in_invalid_pkts),
242 ATL_MACSEC_XSTATS_FIELD(in_ok_pkts),
243 ATL_MACSEC_XSTATS_FIELD(in_unchecked_pkts),
244 ATL_MACSEC_XSTATS_FIELD(in_validated_octets),
245 ATL_MACSEC_XSTATS_FIELD(in_decrypted_octets),
246 /* Egress Common Counters */
247 ATL_MACSEC_XSTATS_FIELD(out_ctl_pkts),
248 ATL_MACSEC_XSTATS_FIELD(out_unknown_sa_pkts),
249 ATL_MACSEC_XSTATS_FIELD(out_untagged_pkts),
250 ATL_MACSEC_XSTATS_FIELD(out_too_long),
251 /* Egress SC Counters */
252 ATL_MACSEC_XSTATS_FIELD(out_sc_protected_pkts),
253 ATL_MACSEC_XSTATS_FIELD(out_sc_encrypted_pkts),
254 /* Egress SA Counters */
255 ATL_MACSEC_XSTATS_FIELD(out_sa_hit_drop_redirect),
256 ATL_MACSEC_XSTATS_FIELD(out_sa_protected2_pkts),
257 ATL_MACSEC_XSTATS_FIELD(out_sa_protected_pkts),
258 ATL_MACSEC_XSTATS_FIELD(out_sa_encrypted_pkts),
261 static const struct eth_dev_ops atl_eth_dev_ops = {
262 .dev_configure = atl_dev_configure,
263 .dev_start = atl_dev_start,
264 .dev_stop = atl_dev_stop,
265 .dev_set_link_up = atl_dev_set_link_up,
266 .dev_set_link_down = atl_dev_set_link_down,
267 .dev_close = atl_dev_close,
268 .dev_reset = atl_dev_reset,
271 .promiscuous_enable = atl_dev_promiscuous_enable,
272 .promiscuous_disable = atl_dev_promiscuous_disable,
273 .allmulticast_enable = atl_dev_allmulticast_enable,
274 .allmulticast_disable = atl_dev_allmulticast_disable,
277 .link_update = atl_dev_link_update,
279 .get_reg = atl_dev_get_regs,
282 .stats_get = atl_dev_stats_get,
283 .xstats_get = atl_dev_xstats_get,
284 .xstats_get_names = atl_dev_xstats_get_names,
285 .stats_reset = atl_dev_stats_reset,
286 .xstats_reset = atl_dev_stats_reset,
288 .fw_version_get = atl_fw_version_get,
289 .dev_infos_get = atl_dev_info_get,
290 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
292 .mtu_set = atl_dev_mtu_set,
295 .vlan_filter_set = atl_vlan_filter_set,
296 .vlan_offload_set = atl_vlan_offload_set,
297 .vlan_tpid_set = atl_vlan_tpid_set,
298 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
301 .rx_queue_start = atl_rx_queue_start,
302 .rx_queue_stop = atl_rx_queue_stop,
303 .rx_queue_setup = atl_rx_queue_setup,
304 .rx_queue_release = atl_rx_queue_release,
306 .tx_queue_start = atl_tx_queue_start,
307 .tx_queue_stop = atl_tx_queue_stop,
308 .tx_queue_setup = atl_tx_queue_setup,
309 .tx_queue_release = atl_tx_queue_release,
311 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
312 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
315 .get_eeprom_length = atl_dev_get_eeprom_length,
316 .get_eeprom = atl_dev_get_eeprom,
317 .set_eeprom = atl_dev_set_eeprom,
320 .flow_ctrl_get = atl_flow_ctrl_get,
321 .flow_ctrl_set = atl_flow_ctrl_set,
324 .mac_addr_add = atl_add_mac_addr,
325 .mac_addr_remove = atl_remove_mac_addr,
326 .mac_addr_set = atl_set_default_mac_addr,
327 .set_mc_addr_list = atl_dev_set_mc_addr_list,
328 .rxq_info_get = atl_rxq_info_get,
329 .txq_info_get = atl_txq_info_get,
331 .reta_update = atl_reta_update,
332 .reta_query = atl_reta_query,
333 .rss_hash_update = atl_rss_hash_update,
334 .rss_hash_conf_get = atl_rss_hash_conf_get,
337 static inline int32_t
338 atl_reset_hw(struct aq_hw_s *hw)
340 return hw_atl_b0_hw_reset(hw);
344 atl_enable_intr(struct rte_eth_dev *dev)
346 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
348 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
352 atl_disable_intr(struct aq_hw_s *hw)
354 PMD_INIT_FUNC_TRACE();
355 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
359 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
361 struct atl_adapter *adapter = eth_dev->data->dev_private;
362 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
363 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
364 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
367 PMD_INIT_FUNC_TRACE();
369 eth_dev->dev_ops = &atl_eth_dev_ops;
371 eth_dev->rx_queue_count = atl_rx_queue_count;
372 eth_dev->rx_descriptor_status = atl_dev_rx_descriptor_status;
373 eth_dev->tx_descriptor_status = atl_dev_tx_descriptor_status;
375 eth_dev->rx_pkt_burst = &atl_recv_pkts;
376 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
377 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
379 /* For secondary processes, the primary process has done all the work */
380 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
383 /* Vendor and Device ID need to be set before init of shared code */
384 hw->device_id = pci_dev->id.device_id;
385 hw->vendor_id = pci_dev->id.vendor_id;
386 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
388 /* Hardware configuration - hardcode */
389 adapter->hw_cfg.is_lro = false;
390 adapter->hw_cfg.wol = false;
391 adapter->hw_cfg.is_rss = false;
392 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
394 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
400 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
401 adapter->hw_cfg.aq_rss.indirection_table_size =
402 HW_ATL_B0_RSS_REDIRECTION_MAX;
404 hw->aq_nic_cfg = &adapter->hw_cfg;
406 pthread_mutex_init(&hw->mbox_mutex, NULL);
408 /* disable interrupt */
409 atl_disable_intr(hw);
411 /* Allocate memory for storing MAC addresses */
412 eth_dev->data->mac_addrs = rte_zmalloc("atlantic",
413 RTE_ETHER_ADDR_LEN, 0);
414 if (eth_dev->data->mac_addrs == NULL) {
415 PMD_INIT_LOG(ERR, "MAC Malloc failed");
419 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
423 /* Copy the permanent MAC address */
424 if (hw->aq_fw_ops->get_mac_permanent(hw,
425 eth_dev->data->mac_addrs->addr_bytes) != 0)
428 /* Reset the hw statistics */
429 atl_dev_stats_reset(eth_dev);
431 rte_intr_callback_register(intr_handle,
432 atl_dev_interrupt_handler, eth_dev);
434 /* enable uio/vfio intr/eventfd mapping */
435 rte_intr_enable(intr_handle);
437 /* enable support intr */
438 atl_enable_intr(eth_dev);
444 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
445 struct rte_pci_device *pci_dev)
447 return rte_eth_dev_pci_generic_probe(pci_dev,
448 sizeof(struct atl_adapter), eth_atl_dev_init);
452 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
454 return rte_eth_dev_pci_generic_remove(pci_dev, atl_dev_close);
458 atl_dev_configure(struct rte_eth_dev *dev)
460 struct atl_interrupt *intr =
461 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
463 PMD_INIT_FUNC_TRACE();
465 /* set flag to update link status after init */
466 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
472 * Configure device link speed and setup link.
473 * It returns 0 on success.
476 atl_dev_start(struct rte_eth_dev *dev)
478 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
479 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
480 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
481 uint32_t intr_vector = 0;
485 PMD_INIT_FUNC_TRACE();
487 /* set adapter started */
488 hw->adapter_stopped = 0;
490 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
492 "Invalid link_speeds for port %u, fix speed not supported",
497 /* disable uio/vfio intr/eventfd mapping */
498 rte_intr_disable(intr_handle);
500 /* reinitialize adapter
501 * this calls reset and start
503 status = atl_reset_hw(hw);
507 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
509 hw_atl_b0_hw_start(hw);
510 /* check and configure queue intr-vector mapping */
511 if ((rte_intr_cap_multiple(intr_handle) ||
512 !RTE_ETH_DEV_SRIOV(dev).active) &&
513 dev->data->dev_conf.intr_conf.rxq != 0) {
514 intr_vector = dev->data->nb_rx_queues;
515 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
516 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
517 ATL_MAX_INTR_QUEUE_NUM);
520 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
521 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
526 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
527 intr_handle->intr_vec = rte_zmalloc("intr_vec",
528 dev->data->nb_rx_queues * sizeof(int), 0);
529 if (intr_handle->intr_vec == NULL) {
530 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
531 " intr_vec", dev->data->nb_rx_queues);
536 /* initialize transmission unit */
539 /* This can fail when allocating mbufs for descriptor rings */
540 err = atl_rx_init(dev);
542 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
546 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
547 hw->fw_ver_actual >> 24,
548 (hw->fw_ver_actual >> 16) & 0xFF,
549 hw->fw_ver_actual & 0xFFFF);
550 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
552 err = atl_start_queues(dev);
554 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
558 err = atl_dev_set_link_up(dev);
560 err = hw->aq_fw_ops->update_link_status(hw);
565 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
567 if (rte_intr_allow_others(intr_handle)) {
568 /* check if lsc interrupt is enabled */
569 if (dev->data->dev_conf.intr_conf.lsc != 0)
570 atl_dev_lsc_interrupt_setup(dev, true);
572 atl_dev_lsc_interrupt_setup(dev, false);
574 rte_intr_callback_unregister(intr_handle,
575 atl_dev_interrupt_handler, dev);
576 if (dev->data->dev_conf.intr_conf.lsc != 0)
577 PMD_INIT_LOG(INFO, "lsc won't enable because of"
578 " no intr multiplex");
581 /* check if rxq interrupt is enabled */
582 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
583 rte_intr_dp_is_en(intr_handle))
584 atl_dev_rxq_interrupt_setup(dev);
586 /* enable uio/vfio intr/eventfd mapping */
587 rte_intr_enable(intr_handle);
589 /* resume enabled intr since hw reset */
590 atl_enable_intr(dev);
595 atl_stop_queues(dev);
600 * Stop device: disable rx and tx functions to allow for reconfiguring.
603 atl_dev_stop(struct rte_eth_dev *dev)
605 struct rte_eth_link link;
607 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
608 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
609 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
611 PMD_INIT_FUNC_TRACE();
612 dev->data->dev_started = 0;
614 /* disable interrupts */
615 atl_disable_intr(hw);
619 hw->adapter_stopped = 1;
621 atl_stop_queues(dev);
623 /* Clear stored conf */
624 dev->data->scattered_rx = 0;
627 /* Clear recorded link status */
628 memset(&link, 0, sizeof(link));
629 rte_eth_linkstatus_set(dev, &link);
631 if (!rte_intr_allow_others(intr_handle))
632 /* resume to the default handler */
633 rte_intr_callback_register(intr_handle,
634 atl_dev_interrupt_handler,
637 /* Clean datapath event and queue/vec mapping */
638 rte_intr_efd_disable(intr_handle);
639 if (intr_handle->intr_vec != NULL) {
640 rte_free(intr_handle->intr_vec);
641 intr_handle->intr_vec = NULL;
646 * Set device link up: enable tx.
649 atl_dev_set_link_up(struct rte_eth_dev *dev)
651 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
652 uint32_t link_speeds = dev->data->dev_conf.link_speeds;
653 uint32_t speed_mask = 0;
655 if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
656 speed_mask = hw->aq_nic_cfg->link_speed_msk;
658 if (link_speeds & ETH_LINK_SPEED_10G)
659 speed_mask |= AQ_NIC_RATE_10G;
660 if (link_speeds & ETH_LINK_SPEED_5G)
661 speed_mask |= AQ_NIC_RATE_5G;
662 if (link_speeds & ETH_LINK_SPEED_1G)
663 speed_mask |= AQ_NIC_RATE_1G;
664 if (link_speeds & ETH_LINK_SPEED_2_5G)
665 speed_mask |= AQ_NIC_RATE_2G5;
666 if (link_speeds & ETH_LINK_SPEED_100M)
667 speed_mask |= AQ_NIC_RATE_100M;
670 return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
674 * Set device link down: disable tx.
677 atl_dev_set_link_down(struct rte_eth_dev *dev)
679 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
681 return hw->aq_fw_ops->set_link_speed(hw, 0);
685 * Reset and stop device.
688 atl_dev_close(struct rte_eth_dev *dev)
690 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
691 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
694 PMD_INIT_FUNC_TRACE();
696 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
699 hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
703 atl_free_queues(dev);
705 /* disable uio intr before callback unregister */
706 rte_intr_disable(intr_handle);
707 rte_intr_callback_unregister(intr_handle,
708 atl_dev_interrupt_handler, dev);
710 pthread_mutex_destroy(&hw->mbox_mutex);
716 atl_dev_reset(struct rte_eth_dev *dev)
720 ret = atl_dev_close(dev);
724 ret = eth_atl_dev_init(dev);
730 atl_dev_configure_macsec(struct rte_eth_dev *dev)
732 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
733 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
734 struct aq_macsec_config *aqcfg = &cf->aq_macsec;
735 struct macsec_msg_fw_request msg_macsec;
736 struct macsec_msg_fw_response response;
738 if (!aqcfg->common.macsec_enabled ||
739 hw->aq_fw_ops->send_macsec_req == NULL)
742 memset(&msg_macsec, 0, sizeof(msg_macsec));
744 /* Creating set of sc/sa structures from parameters provided by DPDK */
746 /* Configure macsec */
747 msg_macsec.msg_type = macsec_cfg_msg;
748 msg_macsec.cfg.enabled = aqcfg->common.macsec_enabled;
749 msg_macsec.cfg.interrupts_enabled = 1;
751 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
756 memset(&msg_macsec, 0, sizeof(msg_macsec));
758 /* Configure TX SC */
760 msg_macsec.msg_type = macsec_add_tx_sc_msg;
761 msg_macsec.txsc.index = 0; /* TXSC always one (??) */
762 msg_macsec.txsc.protect = aqcfg->common.encryption_enabled;
764 /* MAC addr for TX */
765 msg_macsec.txsc.mac_sa[0] = rte_bswap32(aqcfg->txsc.mac[1]);
766 msg_macsec.txsc.mac_sa[1] = rte_bswap32(aqcfg->txsc.mac[0]);
767 msg_macsec.txsc.sa_mask = 0x3f;
769 msg_macsec.txsc.da_mask = 0;
770 msg_macsec.txsc.tci = 0x0B;
771 msg_macsec.txsc.curr_an = 0; /* SA index which currently used */
774 * Creating SCI (Secure Channel Identifier).
775 * SCI constructed from Source MAC and Port identifier
777 uint32_t sci_hi_part = (msg_macsec.txsc.mac_sa[1] << 16) |
778 (msg_macsec.txsc.mac_sa[0] >> 16);
779 uint32_t sci_low_part = (msg_macsec.txsc.mac_sa[0] << 16);
781 uint32_t port_identifier = 1;
783 msg_macsec.txsc.sci[1] = sci_hi_part;
784 msg_macsec.txsc.sci[0] = sci_low_part | port_identifier;
786 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
791 memset(&msg_macsec, 0, sizeof(msg_macsec));
793 /* Configure RX SC */
795 msg_macsec.msg_type = macsec_add_rx_sc_msg;
796 msg_macsec.rxsc.index = aqcfg->rxsc.pi;
797 msg_macsec.rxsc.replay_protect =
798 aqcfg->common.replay_protection_enabled;
799 msg_macsec.rxsc.anti_replay_window = 0;
801 /* MAC addr for RX */
802 msg_macsec.rxsc.mac_da[0] = rte_bswap32(aqcfg->rxsc.mac[1]);
803 msg_macsec.rxsc.mac_da[1] = rte_bswap32(aqcfg->rxsc.mac[0]);
804 msg_macsec.rxsc.da_mask = 0;//0x3f;
806 msg_macsec.rxsc.sa_mask = 0;
808 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
813 memset(&msg_macsec, 0, sizeof(msg_macsec));
815 /* Configure RX SC */
817 msg_macsec.msg_type = macsec_add_tx_sa_msg;
818 msg_macsec.txsa.index = aqcfg->txsa.idx;
819 msg_macsec.txsa.next_pn = aqcfg->txsa.pn;
821 msg_macsec.txsa.key[0] = rte_bswap32(aqcfg->txsa.key[3]);
822 msg_macsec.txsa.key[1] = rte_bswap32(aqcfg->txsa.key[2]);
823 msg_macsec.txsa.key[2] = rte_bswap32(aqcfg->txsa.key[1]);
824 msg_macsec.txsa.key[3] = rte_bswap32(aqcfg->txsa.key[0]);
826 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
831 memset(&msg_macsec, 0, sizeof(msg_macsec));
833 /* Configure RX SA */
835 msg_macsec.msg_type = macsec_add_rx_sa_msg;
836 msg_macsec.rxsa.index = aqcfg->rxsa.idx;
837 msg_macsec.rxsa.next_pn = aqcfg->rxsa.pn;
839 msg_macsec.rxsa.key[0] = rte_bswap32(aqcfg->rxsa.key[3]);
840 msg_macsec.rxsa.key[1] = rte_bswap32(aqcfg->rxsa.key[2]);
841 msg_macsec.rxsa.key[2] = rte_bswap32(aqcfg->rxsa.key[1]);
842 msg_macsec.rxsa.key[3] = rte_bswap32(aqcfg->rxsa.key[0]);
844 hw->aq_fw_ops->send_macsec_req(hw, &msg_macsec, &response);
852 int atl_macsec_enable(struct rte_eth_dev *dev,
853 uint8_t encr, uint8_t repl_prot)
855 struct aq_hw_cfg_s *cfg =
856 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
858 cfg->aq_macsec.common.macsec_enabled = 1;
859 cfg->aq_macsec.common.encryption_enabled = encr;
860 cfg->aq_macsec.common.replay_protection_enabled = repl_prot;
865 int atl_macsec_disable(struct rte_eth_dev *dev)
867 struct aq_hw_cfg_s *cfg =
868 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
870 cfg->aq_macsec.common.macsec_enabled = 0;
875 int atl_macsec_config_txsc(struct rte_eth_dev *dev, uint8_t *mac)
877 struct aq_hw_cfg_s *cfg =
878 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
880 memset(&cfg->aq_macsec.txsc.mac, 0, sizeof(cfg->aq_macsec.txsc.mac));
881 memcpy((uint8_t *)&cfg->aq_macsec.txsc.mac + 2, mac,
887 int atl_macsec_config_rxsc(struct rte_eth_dev *dev,
888 uint8_t *mac, uint16_t pi)
890 struct aq_hw_cfg_s *cfg =
891 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
893 memset(&cfg->aq_macsec.rxsc.mac, 0, sizeof(cfg->aq_macsec.rxsc.mac));
894 memcpy((uint8_t *)&cfg->aq_macsec.rxsc.mac + 2, mac,
896 cfg->aq_macsec.rxsc.pi = pi;
901 int atl_macsec_select_txsa(struct rte_eth_dev *dev,
902 uint8_t idx, uint8_t an,
903 uint32_t pn, uint8_t *key)
905 struct aq_hw_cfg_s *cfg =
906 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
908 cfg->aq_macsec.txsa.idx = idx;
909 cfg->aq_macsec.txsa.pn = pn;
910 cfg->aq_macsec.txsa.an = an;
912 memcpy(&cfg->aq_macsec.txsa.key, key, 16);
916 int atl_macsec_select_rxsa(struct rte_eth_dev *dev,
917 uint8_t idx, uint8_t an,
918 uint32_t pn, uint8_t *key)
920 struct aq_hw_cfg_s *cfg =
921 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
923 cfg->aq_macsec.rxsa.idx = idx;
924 cfg->aq_macsec.rxsa.pn = pn;
925 cfg->aq_macsec.rxsa.an = an;
927 memcpy(&cfg->aq_macsec.rxsa.key, key, 16);
932 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
934 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
935 struct aq_hw_s *hw = &adapter->hw;
936 struct atl_sw_stats *swstats = &adapter->sw_stats;
939 hw->aq_fw_ops->update_stats(hw);
941 /* Fill out the rte_eth_stats statistics structure */
942 stats->ipackets = hw->curr_stats.dma_pkt_rc;
943 stats->ibytes = hw->curr_stats.dma_oct_rc;
944 stats->imissed = hw->curr_stats.dpc;
945 stats->ierrors = hw->curr_stats.erpt;
947 stats->opackets = hw->curr_stats.dma_pkt_tc;
948 stats->obytes = hw->curr_stats.dma_oct_tc;
951 stats->rx_nombuf = swstats->rx_nombuf;
953 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
954 stats->q_ipackets[i] = swstats->q_ipackets[i];
955 stats->q_opackets[i] = swstats->q_opackets[i];
956 stats->q_ibytes[i] = swstats->q_ibytes[i];
957 stats->q_obytes[i] = swstats->q_obytes[i];
958 stats->q_errors[i] = swstats->q_errors[i];
964 atl_dev_stats_reset(struct rte_eth_dev *dev)
966 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
967 struct aq_hw_s *hw = &adapter->hw;
969 hw->aq_fw_ops->update_stats(hw);
971 /* Reset software totals */
972 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
974 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
980 atl_dev_xstats_get_count(struct rte_eth_dev *dev)
982 struct atl_adapter *adapter =
983 (struct atl_adapter *)dev->data->dev_private;
985 struct aq_hw_s *hw = &adapter->hw;
986 unsigned int i, count = 0;
988 for (i = 0; i < RTE_DIM(atl_xstats_tbl); i++) {
989 if (atl_xstats_tbl[i].type == XSTATS_TYPE_MACSEC &&
990 ((hw->caps_lo & BIT(CAPS_LO_MACSEC)) == 0))
1000 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1001 struct rte_eth_xstat_name *xstats_names,
1005 unsigned int count = atl_dev_xstats_get_count(dev);
1008 for (i = 0; i < size && i < count; i++) {
1009 snprintf(xstats_names[i].name,
1010 RTE_ETH_XSTATS_NAME_SIZE, "%s",
1011 atl_xstats_tbl[i].name);
1019 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
1022 struct atl_adapter *adapter = dev->data->dev_private;
1023 struct aq_hw_s *hw = &adapter->hw;
1024 struct get_stats req = { 0 };
1025 struct macsec_msg_fw_request msg = { 0 };
1026 struct macsec_msg_fw_response resp = { 0 };
1029 unsigned int count = atl_dev_xstats_get_count(dev);
1034 if (hw->aq_fw_ops->send_macsec_req != NULL) {
1035 req.ingress_sa_index = 0xff;
1036 req.egress_sc_index = 0xff;
1037 req.egress_sa_index = 0xff;
1039 msg.msg_type = macsec_get_stats_msg;
1042 err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1045 for (i = 0; i < n && i < count; i++) {
1048 switch (atl_xstats_tbl[i].type) {
1049 case XSTATS_TYPE_MSM:
1050 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
1051 atl_xstats_tbl[i].offset);
1053 case XSTATS_TYPE_MACSEC:
1056 *(u64 *)((uint8_t *)&resp.stats +
1057 atl_xstats_tbl[i].offset);
1067 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
1069 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1070 uint32_t fw_ver = 0;
1071 unsigned int ret = 0;
1073 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
1077 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
1078 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
1080 ret += 1; /* add string null-terminator */
1089 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1091 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1093 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
1094 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
1096 dev_info->min_rx_bufsize = 1024;
1097 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
1098 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
1099 dev_info->max_vfs = pci_dev->max_vfs;
1101 dev_info->max_hash_mac_addrs = 0;
1102 dev_info->max_vmdq_pools = 0;
1103 dev_info->vmdq_queue_num = 0;
1105 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
1107 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
1110 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1111 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
1114 dev_info->default_txconf = (struct rte_eth_txconf) {
1115 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
1118 dev_info->rx_desc_lim = rx_desc_lim;
1119 dev_info->tx_desc_lim = tx_desc_lim;
1121 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
1122 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
1123 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
1125 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
1126 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
1127 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
1128 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
1133 static const uint32_t *
1134 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1136 static const uint32_t ptypes[] = {
1138 RTE_PTYPE_L2_ETHER_ARP,
1139 RTE_PTYPE_L2_ETHER_VLAN,
1149 if (dev->rx_pkt_burst == atl_recv_pkts)
1156 atl_dev_delayed_handler(void *param)
1158 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1160 atl_dev_configure_macsec(dev);
1164 /* return 0 means link status changed, -1 means not changed */
1166 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
1168 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1169 struct rte_eth_link link, old;
1170 u32 fc = AQ_NIC_FC_OFF;
1173 link.link_status = ETH_LINK_DOWN;
1174 link.link_speed = 0;
1175 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1176 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
1177 memset(&old, 0, sizeof(old));
1179 /* load old link status */
1180 rte_eth_linkstatus_get(dev, &old);
1182 /* read current link status */
1183 err = hw->aq_fw_ops->update_link_status(hw);
1188 if (hw->aq_link_status.mbps == 0) {
1189 /* write default (down) link status */
1190 rte_eth_linkstatus_set(dev, &link);
1191 if (link.link_status == old.link_status)
1196 link.link_status = ETH_LINK_UP;
1197 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1198 link.link_speed = hw->aq_link_status.mbps;
1200 rte_eth_linkstatus_set(dev, &link);
1202 if (link.link_status == old.link_status)
1205 /* Driver has to update flow control settings on RX block
1206 * on any link event.
1207 * We should query FW whether it negotiated FC.
1209 if (hw->aq_fw_ops->get_flow_control) {
1210 hw->aq_fw_ops->get_flow_control(hw, &fc);
1211 hw_atl_b0_set_fc(hw, fc, 0U);
1214 if (rte_eal_alarm_set(1000 * 1000,
1215 atl_dev_delayed_handler, (void *)dev) < 0)
1216 PMD_DRV_LOG(ERR, "rte_eal_alarm_set fail");
1222 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
1224 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1226 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
1232 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
1234 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1236 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
1242 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
1244 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1246 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
1252 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
1254 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1256 if (dev->data->promiscuous == 1)
1257 return 0; /* must remain in all_multicast mode */
1259 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
1265 * It clears the interrupt causes and enables the interrupt.
1266 * It will be called once only during nic initialized.
1269 * Pointer to struct rte_eth_dev.
1271 * Enable or Disable.
1274 * - On success, zero.
1275 * - On failure, a negative value.
1279 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
1281 atl_dev_link_status_print(dev);
1286 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
1293 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
1295 struct atl_interrupt *intr =
1296 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1297 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1300 hw_atl_b0_hw_irq_read(hw, &cause);
1302 atl_disable_intr(hw);
1304 if (cause & BIT(ATL_IRQ_CAUSE_LINK))
1305 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
1311 * It gets and then prints the link status.
1314 * Pointer to struct rte_eth_dev.
1317 * - On success, zero.
1318 * - On failure, a negative value.
1321 atl_dev_link_status_print(struct rte_eth_dev *dev)
1323 struct rte_eth_link link;
1325 memset(&link, 0, sizeof(link));
1326 rte_eth_linkstatus_get(dev, &link);
1327 if (link.link_status) {
1328 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1329 (int)(dev->data->port_id),
1330 (unsigned int)link.link_speed,
1331 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1332 "full-duplex" : "half-duplex");
1334 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1335 (int)(dev->data->port_id));
1341 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1343 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1344 pci_dev->addr.domain,
1346 pci_dev->addr.devid,
1347 pci_dev->addr.function);
1351 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1355 * It executes link_update after knowing an interrupt occurred.
1358 * Pointer to struct rte_eth_dev.
1361 * - On success, zero.
1362 * - On failure, a negative value.
1365 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1366 struct rte_intr_handle *intr_handle)
1368 struct atl_interrupt *intr =
1369 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1370 struct atl_adapter *adapter = dev->data->dev_private;
1371 struct aq_hw_s *hw = &adapter->hw;
1373 if (!(intr->flags & ATL_FLAG_NEED_LINK_UPDATE))
1376 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1378 /* Notify userapp if link status changed */
1379 if (!atl_dev_link_update(dev, 0)) {
1380 atl_dev_link_status_print(dev);
1381 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1383 if (hw->aq_fw_ops->send_macsec_req == NULL)
1386 /* Check macsec Keys expired */
1387 struct get_stats req = { 0 };
1388 struct macsec_msg_fw_request msg = { 0 };
1389 struct macsec_msg_fw_response resp = { 0 };
1391 req.ingress_sa_index = 0x0;
1392 req.egress_sc_index = 0x0;
1393 req.egress_sa_index = 0x0;
1394 msg.msg_type = macsec_get_stats_msg;
1397 int err = hw->aq_fw_ops->send_macsec_req(hw, &msg, &resp);
1399 PMD_DRV_LOG(ERR, "send_macsec_req fail");
1402 if (resp.stats.egress_threshold_expired ||
1403 resp.stats.ingress_threshold_expired ||
1404 resp.stats.egress_expired ||
1405 resp.stats.ingress_expired) {
1406 PMD_DRV_LOG(INFO, "RTE_ETH_EVENT_MACSEC");
1407 rte_eth_dev_callback_process(dev,
1408 RTE_ETH_EVENT_MACSEC, NULL);
1412 atl_enable_intr(dev);
1413 rte_intr_ack(intr_handle);
1419 * Interrupt handler triggered by NIC for handling
1420 * specific interrupt.
1423 * Pointer to interrupt handle.
1425 * The address of parameter (struct rte_eth_dev *) regsitered before.
1431 atl_dev_interrupt_handler(void *param)
1433 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1435 atl_dev_interrupt_get_status(dev);
1436 atl_dev_interrupt_action(dev, dev->intr_handle);
1441 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1443 return SFP_EEPROM_SIZE;
1446 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1447 struct rte_dev_eeprom_info *eeprom)
1449 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1450 uint32_t dev_addr = SMBUS_DEVICE_ID;
1452 if (hw->aq_fw_ops->get_eeprom == NULL)
1455 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1456 eeprom->data == NULL)
1459 if (eeprom->magic > 0x7F)
1463 dev_addr = eeprom->magic;
1465 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1466 eeprom->length, eeprom->offset);
1469 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1470 struct rte_dev_eeprom_info *eeprom)
1472 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1473 uint32_t dev_addr = SMBUS_DEVICE_ID;
1475 if (hw->aq_fw_ops->set_eeprom == NULL)
1478 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1479 eeprom->data == NULL)
1482 if (eeprom->magic > 0x7F)
1486 dev_addr = eeprom->magic;
1488 return hw->aq_fw_ops->set_eeprom(hw, dev_addr, eeprom->data,
1489 eeprom->length, eeprom->offset);
1493 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1495 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1499 if (regs->data == NULL) {
1500 regs->length = hw_atl_utils_hw_get_reg_length();
1501 regs->width = sizeof(u32);
1505 /* Only full register dump is supported */
1506 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1509 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1511 /* Device version */
1512 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1513 regs->version = mif_id & 0xFFU;
1519 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1521 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1522 u32 fc = AQ_NIC_FC_OFF;
1524 if (hw->aq_fw_ops->get_flow_control == NULL)
1527 hw->aq_fw_ops->get_flow_control(hw, &fc);
1529 if (fc == AQ_NIC_FC_OFF)
1530 fc_conf->mode = RTE_FC_NONE;
1531 else if ((fc & AQ_NIC_FC_RX) && (fc & AQ_NIC_FC_TX))
1532 fc_conf->mode = RTE_FC_FULL;
1533 else if (fc & AQ_NIC_FC_RX)
1534 fc_conf->mode = RTE_FC_RX_PAUSE;
1535 else if (fc & AQ_NIC_FC_TX)
1536 fc_conf->mode = RTE_FC_TX_PAUSE;
1542 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1544 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1545 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1548 if (hw->aq_fw_ops->set_flow_control == NULL)
1551 if (fc_conf->mode == RTE_FC_NONE)
1552 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1553 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1554 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1555 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1556 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1557 else if (fc_conf->mode == RTE_FC_FULL)
1558 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1560 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1561 return hw->aq_fw_ops->set_flow_control(hw);
1567 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1568 u8 *mac_addr, bool enable)
1570 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1571 unsigned int h = 0U;
1572 unsigned int l = 0U;
1576 h = (mac_addr[0] << 8) | (mac_addr[1]);
1577 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1578 (mac_addr[4] << 8) | mac_addr[5];
1581 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1582 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1583 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1586 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1588 err = aq_hw_err_from_flags(hw);
1594 atl_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1595 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1597 if (rte_is_zero_ether_addr(mac_addr)) {
1598 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1602 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1606 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1608 atl_update_mac_addr(dev, index, NULL, false);
1612 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
1614 atl_remove_mac_addr(dev, 0);
1615 atl_add_mac_addr(dev, addr, 0, 0);
1620 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1622 struct rte_eth_dev_info dev_info;
1624 uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
1626 ret = atl_dev_info_get(dev, &dev_info);
1630 if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
1633 /* update max frame size */
1634 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1640 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1642 struct aq_hw_cfg_s *cfg =
1643 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1644 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1648 PMD_INIT_FUNC_TRACE();
1650 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1651 if (cfg->vlan_filter[i] == vlan_id) {
1653 /* Disable VLAN filter. */
1654 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1656 /* Clear VLAN filter entry */
1657 cfg->vlan_filter[i] = 0;
1663 /* VLAN_ID was not found. So, nothing to delete. */
1664 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1667 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1668 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1671 /* Try to found free VLAN filter to add new VLAN_ID */
1672 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1673 if (cfg->vlan_filter[i] == 0)
1677 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1678 /* We have no free VLAN filter to add new VLAN_ID*/
1683 cfg->vlan_filter[i] = vlan_id;
1684 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1685 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1686 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1689 /* Enable VLAN promisc mode if vlan_filter empty */
1690 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1691 if (cfg->vlan_filter[i] != 0)
1695 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1701 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1703 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1704 struct aq_hw_cfg_s *cfg =
1705 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1708 PMD_INIT_FUNC_TRACE();
1710 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1711 if (cfg->vlan_filter[i])
1712 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1718 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1720 struct aq_hw_cfg_s *cfg =
1721 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1722 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1726 PMD_INIT_FUNC_TRACE();
1728 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1730 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1732 for (i = 0; i < dev->data->nb_rx_queues; i++)
1733 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1735 if (mask & ETH_VLAN_EXTEND_MASK)
1742 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1745 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1748 PMD_INIT_FUNC_TRACE();
1750 switch (vlan_type) {
1751 case ETH_VLAN_TYPE_INNER:
1752 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1754 case ETH_VLAN_TYPE_OUTER:
1755 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1758 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1766 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1768 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1770 PMD_INIT_FUNC_TRACE();
1772 if (queue_id > dev->data->nb_rx_queues) {
1773 PMD_DRV_LOG(ERR, "Invalid queue id");
1777 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1781 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1782 struct rte_ether_addr *mc_addr_set,
1783 uint32_t nb_mc_addr)
1785 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1788 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1791 /* Update whole uc filters table */
1792 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1793 u8 *mac_addr = NULL;
1796 if (i < nb_mc_addr) {
1797 mac_addr = mc_addr_set[i].addr_bytes;
1798 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1799 (mac_addr[4] << 8) | mac_addr[5];
1800 h = (mac_addr[0] << 8) | mac_addr[1];
1803 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1804 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1805 HW_ATL_B0_MAC_MIN + i);
1806 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1807 HW_ATL_B0_MAC_MIN + i);
1808 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1809 HW_ATL_B0_MAC_MIN + i);
1816 atl_reta_update(struct rte_eth_dev *dev,
1817 struct rte_eth_rss_reta_entry64 *reta_conf,
1821 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1822 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1824 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1825 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1826 dev->data->nb_rx_queues - 1);
1828 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1833 atl_reta_query(struct rte_eth_dev *dev,
1834 struct rte_eth_rss_reta_entry64 *reta_conf,
1838 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1840 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1841 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1842 reta_conf->mask = ~0U;
1847 atl_rss_hash_update(struct rte_eth_dev *dev,
1848 struct rte_eth_rss_conf *rss_conf)
1850 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1851 struct aq_hw_cfg_s *cfg =
1852 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1853 static u8 def_rss_key[40] = {
1854 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1855 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1856 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1857 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1858 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1861 cfg->is_rss = !!rss_conf->rss_hf;
1862 if (rss_conf->rss_key) {
1863 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1864 rss_conf->rss_key_len);
1865 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1867 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1868 sizeof(def_rss_key));
1869 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1872 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1873 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1878 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1879 struct rte_eth_rss_conf *rss_conf)
1881 struct aq_hw_cfg_s *cfg =
1882 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1884 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1885 if (rss_conf->rss_key) {
1886 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1887 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1888 rss_conf->rss_key_len);
1895 is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
1897 if (strcmp(dev->device->driver->name, drv->driver.name))
1904 is_atlantic_supported(struct rte_eth_dev *dev)
1906 return is_device_supported(dev, &rte_atl_pmd);
1909 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1910 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1911 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1912 RTE_LOG_REGISTER(atl_logtype_init, pmd.net.atlantic.init, NOTICE);
1913 RTE_LOG_REGISTER(atl_logtype_driver, pmd.net.atlantic.driver, NOTICE);