1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 struct rte_eth_xstat_name *xstats_names,
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 struct rte_eth_stats *stats);
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 struct rte_eth_xstat *stats, unsigned int n);
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 struct rte_eth_dev_info *dev_info);
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
51 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
55 uint16_t vlan_id, int on);
57 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
59 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
60 uint16_t queue_id, int on);
62 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
63 enum rte_vlan_type vlan_type, uint16_t tpid);
66 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
67 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
68 struct rte_dev_eeprom_info *eeprom);
69 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
70 struct rte_dev_eeprom_info *eeprom);
73 static int atl_dev_get_regs(struct rte_eth_dev *dev,
74 struct rte_dev_reg_info *regs);
77 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
78 struct rte_eth_fc_conf *fc_conf);
79 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
80 struct rte_eth_fc_conf *fc_conf);
82 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
85 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
86 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
87 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
88 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
89 struct rte_intr_handle *handle);
90 static void atl_dev_interrupt_handler(void *param);
93 static int atl_add_mac_addr(struct rte_eth_dev *dev,
94 struct ether_addr *mac_addr,
95 uint32_t index, uint32_t pool);
96 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
97 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
98 struct ether_addr *mac_addr);
100 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
101 struct ether_addr *mc_addr_set,
102 uint32_t nb_mc_addr);
105 static int atl_reta_update(struct rte_eth_dev *dev,
106 struct rte_eth_rss_reta_entry64 *reta_conf,
108 static int atl_reta_query(struct rte_eth_dev *dev,
109 struct rte_eth_rss_reta_entry64 *reta_conf,
111 static int atl_rss_hash_update(struct rte_eth_dev *dev,
112 struct rte_eth_rss_conf *rss_conf);
113 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
114 struct rte_eth_rss_conf *rss_conf);
117 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
118 struct rte_pci_device *pci_dev);
119 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
121 static void atl_dev_info_get(struct rte_eth_dev *dev,
122 struct rte_eth_dev_info *dev_info);
124 int atl_logtype_init;
125 int atl_logtype_driver;
128 * The set of PCI devices this driver supports
130 static const struct rte_pci_id pci_id_atl_map[] = {
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
137 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
144 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
149 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
151 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
152 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
153 { .vendor_id = 0, /* sentinel */ },
156 static struct rte_pci_driver rte_atl_pmd = {
157 .id_table = pci_id_atl_map,
158 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
159 RTE_PCI_DRV_IOVA_AS_VA,
160 .probe = eth_atl_pci_probe,
161 .remove = eth_atl_pci_remove,
164 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
165 | DEV_RX_OFFLOAD_IPV4_CKSUM \
166 | DEV_RX_OFFLOAD_UDP_CKSUM \
167 | DEV_RX_OFFLOAD_TCP_CKSUM \
168 | DEV_RX_OFFLOAD_JUMBO_FRAME)
170 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
171 | DEV_TX_OFFLOAD_IPV4_CKSUM \
172 | DEV_TX_OFFLOAD_UDP_CKSUM \
173 | DEV_TX_OFFLOAD_TCP_CKSUM \
174 | DEV_TX_OFFLOAD_TCP_TSO \
175 | DEV_TX_OFFLOAD_MULTI_SEGS)
177 static const struct rte_eth_desc_lim rx_desc_lim = {
178 .nb_max = ATL_MAX_RING_DESC,
179 .nb_min = ATL_MIN_RING_DESC,
180 .nb_align = ATL_RXD_ALIGN,
183 static const struct rte_eth_desc_lim tx_desc_lim = {
184 .nb_max = ATL_MAX_RING_DESC,
185 .nb_min = ATL_MIN_RING_DESC,
186 .nb_align = ATL_TXD_ALIGN,
187 .nb_seg_max = ATL_TX_MAX_SEG,
188 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
191 #define ATL_XSTATS_FIELD(name) { \
193 offsetof(struct aq_stats_s, name) \
196 struct atl_xstats_tbl_s {
201 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
202 ATL_XSTATS_FIELD(uprc),
203 ATL_XSTATS_FIELD(mprc),
204 ATL_XSTATS_FIELD(bprc),
205 ATL_XSTATS_FIELD(erpt),
206 ATL_XSTATS_FIELD(uptc),
207 ATL_XSTATS_FIELD(mptc),
208 ATL_XSTATS_FIELD(bptc),
209 ATL_XSTATS_FIELD(erpr),
210 ATL_XSTATS_FIELD(ubrc),
211 ATL_XSTATS_FIELD(ubtc),
212 ATL_XSTATS_FIELD(mbrc),
213 ATL_XSTATS_FIELD(mbtc),
214 ATL_XSTATS_FIELD(bbrc),
215 ATL_XSTATS_FIELD(bbtc),
218 static const struct eth_dev_ops atl_eth_dev_ops = {
219 .dev_configure = atl_dev_configure,
220 .dev_start = atl_dev_start,
221 .dev_stop = atl_dev_stop,
222 .dev_set_link_up = atl_dev_set_link_up,
223 .dev_set_link_down = atl_dev_set_link_down,
224 .dev_close = atl_dev_close,
225 .dev_reset = atl_dev_reset,
228 .promiscuous_enable = atl_dev_promiscuous_enable,
229 .promiscuous_disable = atl_dev_promiscuous_disable,
230 .allmulticast_enable = atl_dev_allmulticast_enable,
231 .allmulticast_disable = atl_dev_allmulticast_disable,
234 .link_update = atl_dev_link_update,
236 .get_reg = atl_dev_get_regs,
239 .stats_get = atl_dev_stats_get,
240 .xstats_get = atl_dev_xstats_get,
241 .xstats_get_names = atl_dev_xstats_get_names,
242 .stats_reset = atl_dev_stats_reset,
243 .xstats_reset = atl_dev_stats_reset,
245 .fw_version_get = atl_fw_version_get,
246 .dev_infos_get = atl_dev_info_get,
247 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
249 .mtu_set = atl_dev_mtu_set,
252 .vlan_filter_set = atl_vlan_filter_set,
253 .vlan_offload_set = atl_vlan_offload_set,
254 .vlan_tpid_set = atl_vlan_tpid_set,
255 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
258 .rx_queue_start = atl_rx_queue_start,
259 .rx_queue_stop = atl_rx_queue_stop,
260 .rx_queue_setup = atl_rx_queue_setup,
261 .rx_queue_release = atl_rx_queue_release,
263 .tx_queue_start = atl_tx_queue_start,
264 .tx_queue_stop = atl_tx_queue_stop,
265 .tx_queue_setup = atl_tx_queue_setup,
266 .tx_queue_release = atl_tx_queue_release,
268 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
269 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
271 .rx_queue_count = atl_rx_queue_count,
272 .rx_descriptor_status = atl_dev_rx_descriptor_status,
273 .tx_descriptor_status = atl_dev_tx_descriptor_status,
276 .get_eeprom_length = atl_dev_get_eeprom_length,
277 .get_eeprom = atl_dev_get_eeprom,
278 .set_eeprom = atl_dev_set_eeprom,
281 .flow_ctrl_get = atl_flow_ctrl_get,
282 .flow_ctrl_set = atl_flow_ctrl_set,
285 .mac_addr_add = atl_add_mac_addr,
286 .mac_addr_remove = atl_remove_mac_addr,
287 .mac_addr_set = atl_set_default_mac_addr,
288 .set_mc_addr_list = atl_dev_set_mc_addr_list,
289 .rxq_info_get = atl_rxq_info_get,
290 .txq_info_get = atl_txq_info_get,
292 .reta_update = atl_reta_update,
293 .reta_query = atl_reta_query,
294 .rss_hash_update = atl_rss_hash_update,
295 .rss_hash_conf_get = atl_rss_hash_conf_get,
298 static inline int32_t
299 atl_reset_hw(struct aq_hw_s *hw)
301 return hw_atl_b0_hw_reset(hw);
305 atl_enable_intr(struct rte_eth_dev *dev)
307 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
309 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
313 atl_disable_intr(struct aq_hw_s *hw)
315 PMD_INIT_FUNC_TRACE();
316 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
320 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
322 struct atl_adapter *adapter =
323 (struct atl_adapter *)eth_dev->data->dev_private;
324 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
325 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
326 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
329 PMD_INIT_FUNC_TRACE();
331 eth_dev->dev_ops = &atl_eth_dev_ops;
332 eth_dev->rx_pkt_burst = &atl_recv_pkts;
333 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
334 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
336 /* For secondary processes, the primary process has done all the work */
337 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
340 /* Vendor and Device ID need to be set before init of shared code */
341 hw->device_id = pci_dev->id.device_id;
342 hw->vendor_id = pci_dev->id.vendor_id;
343 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
345 /* Hardware configuration - hardcode */
346 adapter->hw_cfg.is_lro = false;
347 adapter->hw_cfg.wol = false;
348 adapter->hw_cfg.is_rss = false;
349 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
351 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
357 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
358 adapter->hw_cfg.aq_rss.indirection_table_size =
359 HW_ATL_B0_RSS_REDIRECTION_MAX;
361 hw->aq_nic_cfg = &adapter->hw_cfg;
363 /* disable interrupt */
364 atl_disable_intr(hw);
366 /* Allocate memory for storing MAC addresses */
367 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
368 if (eth_dev->data->mac_addrs == NULL) {
369 PMD_INIT_LOG(ERR, "MAC Malloc failed");
373 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
377 /* Copy the permanent MAC address */
378 if (hw->aq_fw_ops->get_mac_permanent(hw,
379 eth_dev->data->mac_addrs->addr_bytes) != 0)
382 /* Reset the hw statistics */
383 atl_dev_stats_reset(eth_dev);
385 rte_intr_callback_register(intr_handle,
386 atl_dev_interrupt_handler, eth_dev);
388 /* enable uio/vfio intr/eventfd mapping */
389 rte_intr_enable(intr_handle);
391 /* enable support intr */
392 atl_enable_intr(eth_dev);
398 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
400 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
401 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
404 PMD_INIT_FUNC_TRACE();
406 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
409 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
411 if (hw->adapter_stopped == 0)
412 atl_dev_close(eth_dev);
414 eth_dev->dev_ops = NULL;
415 eth_dev->rx_pkt_burst = NULL;
416 eth_dev->tx_pkt_burst = NULL;
418 /* disable uio intr before callback unregister */
419 rte_intr_disable(intr_handle);
420 rte_intr_callback_unregister(intr_handle,
421 atl_dev_interrupt_handler, eth_dev);
423 rte_free(eth_dev->data->mac_addrs);
424 eth_dev->data->mac_addrs = NULL;
430 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
431 struct rte_pci_device *pci_dev)
433 return rte_eth_dev_pci_generic_probe(pci_dev,
434 sizeof(struct atl_adapter), eth_atl_dev_init);
438 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
440 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
444 atl_dev_configure(struct rte_eth_dev *dev)
446 struct atl_interrupt *intr =
447 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
449 PMD_INIT_FUNC_TRACE();
451 /* set flag to update link status after init */
452 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
458 * Configure device link speed and setup link.
459 * It returns 0 on success.
462 atl_dev_start(struct rte_eth_dev *dev)
464 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
465 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
466 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
467 uint32_t intr_vector = 0;
471 PMD_INIT_FUNC_TRACE();
473 /* set adapter started */
474 hw->adapter_stopped = 0;
476 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
478 "Invalid link_speeds for port %u, fix speed not supported",
483 /* disable uio/vfio intr/eventfd mapping */
484 rte_intr_disable(intr_handle);
486 /* reinitialize adapter
487 * this calls reset and start
489 status = atl_reset_hw(hw);
493 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
495 hw_atl_b0_hw_start(hw);
496 /* check and configure queue intr-vector mapping */
497 if ((rte_intr_cap_multiple(intr_handle) ||
498 !RTE_ETH_DEV_SRIOV(dev).active) &&
499 dev->data->dev_conf.intr_conf.rxq != 0) {
500 intr_vector = dev->data->nb_rx_queues;
501 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
502 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
503 ATL_MAX_INTR_QUEUE_NUM);
506 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
507 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
512 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
513 intr_handle->intr_vec = rte_zmalloc("intr_vec",
514 dev->data->nb_rx_queues * sizeof(int), 0);
515 if (intr_handle->intr_vec == NULL) {
516 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
517 " intr_vec", dev->data->nb_rx_queues);
522 /* initialize transmission unit */
525 /* This can fail when allocating mbufs for descriptor rings */
526 err = atl_rx_init(dev);
528 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
532 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
533 hw->fw_ver_actual >> 24,
534 (hw->fw_ver_actual >> 16) & 0xFF,
535 hw->fw_ver_actual & 0xFFFF);
536 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
538 err = atl_start_queues(dev);
540 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
544 err = atl_dev_set_link_up(dev);
546 err = hw->aq_fw_ops->update_link_status(hw);
551 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
556 if (rte_intr_allow_others(intr_handle)) {
557 /* check if lsc interrupt is enabled */
558 if (dev->data->dev_conf.intr_conf.lsc != 0)
559 atl_dev_lsc_interrupt_setup(dev, true);
561 atl_dev_lsc_interrupt_setup(dev, false);
563 rte_intr_callback_unregister(intr_handle,
564 atl_dev_interrupt_handler, dev);
565 if (dev->data->dev_conf.intr_conf.lsc != 0)
566 PMD_INIT_LOG(INFO, "lsc won't enable because of"
567 " no intr multiplex");
570 /* check if rxq interrupt is enabled */
571 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
572 rte_intr_dp_is_en(intr_handle))
573 atl_dev_rxq_interrupt_setup(dev);
575 /* enable uio/vfio intr/eventfd mapping */
576 rte_intr_enable(intr_handle);
578 /* resume enabled intr since hw reset */
579 atl_enable_intr(dev);
584 atl_stop_queues(dev);
589 * Stop device: disable rx and tx functions to allow for reconfiguring.
592 atl_dev_stop(struct rte_eth_dev *dev)
594 struct rte_eth_link link;
596 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
597 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
598 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
600 PMD_INIT_FUNC_TRACE();
602 /* disable interrupts */
603 atl_disable_intr(hw);
607 hw->adapter_stopped = 1;
609 atl_stop_queues(dev);
611 /* Clear stored conf */
612 dev->data->scattered_rx = 0;
615 /* Clear recorded link status */
616 memset(&link, 0, sizeof(link));
617 rte_eth_linkstatus_set(dev, &link);
619 if (!rte_intr_allow_others(intr_handle))
620 /* resume to the default handler */
621 rte_intr_callback_register(intr_handle,
622 atl_dev_interrupt_handler,
625 /* Clean datapath event and queue/vec mapping */
626 rte_intr_efd_disable(intr_handle);
627 if (intr_handle->intr_vec != NULL) {
628 rte_free(intr_handle->intr_vec);
629 intr_handle->intr_vec = NULL;
634 * Set device link up: enable tx.
637 atl_dev_set_link_up(struct rte_eth_dev *dev)
639 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
640 uint32_t link_speeds = dev->data->dev_conf.link_speeds;
641 uint32_t speed_mask = 0;
643 if (link_speeds == ETH_LINK_SPEED_AUTONEG) {
644 speed_mask = hw->aq_nic_cfg->link_speed_msk;
646 if (link_speeds & ETH_LINK_SPEED_10G)
647 speed_mask |= AQ_NIC_RATE_10G;
648 if (link_speeds & ETH_LINK_SPEED_5G)
649 speed_mask |= AQ_NIC_RATE_5G;
650 if (link_speeds & ETH_LINK_SPEED_1G)
651 speed_mask |= AQ_NIC_RATE_1G;
652 if (link_speeds & ETH_LINK_SPEED_2_5G)
653 speed_mask |= AQ_NIC_RATE_2G5;
654 if (link_speeds & ETH_LINK_SPEED_100M)
655 speed_mask |= AQ_NIC_RATE_100M;
658 return hw->aq_fw_ops->set_link_speed(hw, speed_mask);
662 * Set device link down: disable tx.
665 atl_dev_set_link_down(struct rte_eth_dev *dev)
667 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
669 return hw->aq_fw_ops->set_link_speed(hw, 0);
673 * Reset and stop device.
676 atl_dev_close(struct rte_eth_dev *dev)
678 PMD_INIT_FUNC_TRACE();
682 atl_free_queues(dev);
686 atl_dev_reset(struct rte_eth_dev *dev)
690 ret = eth_atl_dev_uninit(dev);
694 ret = eth_atl_dev_init(dev);
701 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
703 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
704 struct aq_hw_s *hw = &adapter->hw;
705 struct atl_sw_stats *swstats = &adapter->sw_stats;
708 hw->aq_fw_ops->update_stats(hw);
710 /* Fill out the rte_eth_stats statistics structure */
711 stats->ipackets = hw->curr_stats.dma_pkt_rc;
712 stats->ibytes = hw->curr_stats.dma_oct_rc;
713 stats->imissed = hw->curr_stats.dpc;
714 stats->ierrors = hw->curr_stats.erpt;
716 stats->opackets = hw->curr_stats.dma_pkt_tc;
717 stats->obytes = hw->curr_stats.dma_oct_tc;
720 stats->rx_nombuf = swstats->rx_nombuf;
722 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
723 stats->q_ipackets[i] = swstats->q_ipackets[i];
724 stats->q_opackets[i] = swstats->q_opackets[i];
725 stats->q_ibytes[i] = swstats->q_ibytes[i];
726 stats->q_obytes[i] = swstats->q_obytes[i];
727 stats->q_errors[i] = swstats->q_errors[i];
733 atl_dev_stats_reset(struct rte_eth_dev *dev)
735 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
736 struct aq_hw_s *hw = &adapter->hw;
738 hw->aq_fw_ops->update_stats(hw);
740 /* Reset software totals */
741 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
743 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
747 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
748 struct rte_eth_xstat_name *xstats_names,
754 return RTE_DIM(atl_xstats_tbl);
756 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
757 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
758 atl_xstats_tbl[i].name);
764 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
767 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
768 struct aq_hw_s *hw = &adapter->hw;
774 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
776 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
777 atl_xstats_tbl[i].offset);
784 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
786 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
788 unsigned int ret = 0;
790 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
794 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
795 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
797 ret += 1; /* add string null-terminator */
806 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
808 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
810 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
811 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
813 dev_info->min_rx_bufsize = 1024;
814 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
815 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
816 dev_info->max_vfs = pci_dev->max_vfs;
818 dev_info->max_hash_mac_addrs = 0;
819 dev_info->max_vmdq_pools = 0;
820 dev_info->vmdq_queue_num = 0;
822 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
824 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
827 dev_info->default_rxconf = (struct rte_eth_rxconf) {
828 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
831 dev_info->default_txconf = (struct rte_eth_txconf) {
832 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
835 dev_info->rx_desc_lim = rx_desc_lim;
836 dev_info->tx_desc_lim = tx_desc_lim;
838 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
839 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
840 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
842 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
843 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
844 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
845 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
848 static const uint32_t *
849 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
851 static const uint32_t ptypes[] = {
853 RTE_PTYPE_L2_ETHER_ARP,
854 RTE_PTYPE_L2_ETHER_VLAN,
864 if (dev->rx_pkt_burst == atl_recv_pkts)
870 /* return 0 means link status changed, -1 means not changed */
872 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
874 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
875 struct atl_interrupt *intr =
876 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
877 struct rte_eth_link link, old;
880 link.link_status = ETH_LINK_DOWN;
882 link.link_duplex = ETH_LINK_FULL_DUPLEX;
883 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
884 memset(&old, 0, sizeof(old));
886 /* load old link status */
887 rte_eth_linkstatus_get(dev, &old);
889 /* read current link status */
890 err = hw->aq_fw_ops->update_link_status(hw);
895 if (hw->aq_link_status.mbps == 0) {
896 /* write default (down) link status */
897 rte_eth_linkstatus_set(dev, &link);
898 if (link.link_status == old.link_status)
903 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
905 link.link_status = ETH_LINK_UP;
906 link.link_duplex = ETH_LINK_FULL_DUPLEX;
907 link.link_speed = hw->aq_link_status.mbps;
909 rte_eth_linkstatus_set(dev, &link);
911 if (link.link_status == old.link_status)
918 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
920 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
922 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
926 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
928 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
930 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
934 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
936 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
938 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
942 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
944 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
946 if (dev->data->promiscuous == 1)
947 return; /* must remain in all_multicast mode */
949 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
953 * It clears the interrupt causes and enables the interrupt.
954 * It will be called once only during nic initialized.
957 * Pointer to struct rte_eth_dev.
962 * - On success, zero.
963 * - On failure, a negative value.
967 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
969 atl_dev_link_status_print(dev);
974 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
981 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
983 struct atl_interrupt *intr =
984 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
985 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
988 hw_atl_b0_hw_irq_read(hw, &cause);
990 atl_disable_intr(hw);
991 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
992 ATL_FLAG_NEED_LINK_UPDATE : 0;
998 * It gets and then prints the link status.
1001 * Pointer to struct rte_eth_dev.
1004 * - On success, zero.
1005 * - On failure, a negative value.
1008 atl_dev_link_status_print(struct rte_eth_dev *dev)
1010 struct rte_eth_link link;
1012 memset(&link, 0, sizeof(link));
1013 rte_eth_linkstatus_get(dev, &link);
1014 if (link.link_status) {
1015 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1016 (int)(dev->data->port_id),
1017 (unsigned int)link.link_speed,
1018 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1019 "full-duplex" : "half-duplex");
1021 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1022 (int)(dev->data->port_id));
1028 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1030 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1031 pci_dev->addr.domain,
1033 pci_dev->addr.devid,
1034 pci_dev->addr.function);
1038 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1042 * It executes link_update after knowing an interrupt occurred.
1045 * Pointer to struct rte_eth_dev.
1048 * - On success, zero.
1049 * - On failure, a negative value.
1052 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1053 struct rte_intr_handle *intr_handle)
1055 struct atl_interrupt *intr =
1056 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1058 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1059 atl_dev_link_update(dev, 0);
1060 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1061 atl_dev_link_status_print(dev);
1062 _rte_eth_dev_callback_process(dev,
1063 RTE_ETH_EVENT_INTR_LSC, NULL);
1066 atl_enable_intr(dev);
1067 rte_intr_enable(intr_handle);
1073 * Interrupt handler triggered by NIC for handling
1074 * specific interrupt.
1077 * Pointer to interrupt handle.
1079 * The address of parameter (struct rte_eth_dev *) regsitered before.
1085 atl_dev_interrupt_handler(void *param)
1087 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1089 atl_dev_interrupt_get_status(dev);
1090 atl_dev_interrupt_action(dev, dev->intr_handle);
1093 #define SFP_EEPROM_SIZE 0xff
1096 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1098 return SFP_EEPROM_SIZE;
1101 int atl_dev_get_eeprom(struct rte_eth_dev *dev,
1102 struct rte_dev_eeprom_info *eeprom)
1104 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1105 uint32_t dev_addr = SMBUS_DEVICE_ID;
1107 if (hw->aq_fw_ops->get_eeprom == NULL)
1110 if (eeprom->length + eeprom->offset > SFP_EEPROM_SIZE ||
1111 eeprom->data == NULL)
1115 dev_addr = eeprom->magic;
1117 return hw->aq_fw_ops->get_eeprom(hw, dev_addr, eeprom->data,
1118 eeprom->length, eeprom->offset);
1121 int atl_dev_set_eeprom(struct rte_eth_dev *dev,
1122 struct rte_dev_eeprom_info *eeprom)
1124 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1125 uint32_t dev_addr = SMBUS_DEVICE_ID;
1127 if (hw->aq_fw_ops->set_eeprom == NULL)
1130 if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1134 dev_addr = eeprom->magic;
1136 return hw->aq_fw_ops->set_eeprom(hw, dev_addr,
1137 eeprom->data, eeprom->length);
1141 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1143 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1147 if (regs->data == NULL) {
1148 regs->length = hw_atl_utils_hw_get_reg_length();
1149 regs->width = sizeof(u32);
1153 /* Only full register dump is supported */
1154 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1157 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1159 /* Device version */
1160 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1161 regs->version = mif_id & 0xFFU;
1167 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1169 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1171 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1172 fc_conf->mode = RTE_FC_NONE;
1173 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1174 fc_conf->mode = RTE_FC_FULL;
1175 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1176 fc_conf->mode = RTE_FC_RX_PAUSE;
1177 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1178 fc_conf->mode = RTE_FC_TX_PAUSE;
1184 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1186 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1187 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1190 if (hw->aq_fw_ops->set_flow_control == NULL)
1193 if (fc_conf->mode == RTE_FC_NONE)
1194 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1195 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1196 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1197 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1198 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1199 else if (fc_conf->mode == RTE_FC_FULL)
1200 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1202 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1203 return hw->aq_fw_ops->set_flow_control(hw);
1209 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1210 u8 *mac_addr, bool enable)
1212 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1213 unsigned int h = 0U;
1214 unsigned int l = 0U;
1218 h = (mac_addr[0] << 8) | (mac_addr[1]);
1219 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1220 (mac_addr[4] << 8) | mac_addr[5];
1223 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1224 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1225 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1228 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1230 err = aq_hw_err_from_flags(hw);
1236 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1237 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1239 if (is_zero_ether_addr(mac_addr)) {
1240 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1244 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1248 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1250 atl_update_mac_addr(dev, index, NULL, false);
1254 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1256 atl_remove_mac_addr(dev, 0);
1257 atl_add_mac_addr(dev, addr, 0, 0);
1262 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1264 struct rte_eth_dev_info dev_info;
1265 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1267 atl_dev_info_get(dev, &dev_info);
1269 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1272 /* update max frame size */
1273 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1279 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1281 struct aq_hw_cfg_s *cfg =
1282 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1283 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1287 PMD_INIT_FUNC_TRACE();
1289 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1290 if (cfg->vlan_filter[i] == vlan_id) {
1292 /* Disable VLAN filter. */
1293 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1295 /* Clear VLAN filter entry */
1296 cfg->vlan_filter[i] = 0;
1302 /* VLAN_ID was not found. So, nothing to delete. */
1303 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1306 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1307 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1310 /* Try to found free VLAN filter to add new VLAN_ID */
1311 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1312 if (cfg->vlan_filter[i] == 0)
1316 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1317 /* We have no free VLAN filter to add new VLAN_ID*/
1322 cfg->vlan_filter[i] = vlan_id;
1323 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1324 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1325 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1328 /* Enable VLAN promisc mode if vlan_filter empty */
1329 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1330 if (cfg->vlan_filter[i] != 0)
1334 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1340 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1342 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1343 struct aq_hw_cfg_s *cfg =
1344 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1347 PMD_INIT_FUNC_TRACE();
1349 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1350 if (cfg->vlan_filter[i])
1351 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1357 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1359 struct aq_hw_cfg_s *cfg =
1360 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1361 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1365 PMD_INIT_FUNC_TRACE();
1367 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1369 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1371 for (i = 0; i < dev->data->nb_rx_queues; i++)
1372 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1374 if (mask & ETH_VLAN_EXTEND_MASK)
1381 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1384 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1387 PMD_INIT_FUNC_TRACE();
1389 switch (vlan_type) {
1390 case ETH_VLAN_TYPE_INNER:
1391 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1393 case ETH_VLAN_TYPE_OUTER:
1394 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1397 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1405 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1407 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1409 PMD_INIT_FUNC_TRACE();
1411 if (queue_id > dev->data->nb_rx_queues) {
1412 PMD_DRV_LOG(ERR, "Invalid queue id");
1416 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1420 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1421 struct ether_addr *mc_addr_set,
1422 uint32_t nb_mc_addr)
1424 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1427 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1430 /* Update whole uc filters table */
1431 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1432 u8 *mac_addr = NULL;
1435 if (i < nb_mc_addr) {
1436 mac_addr = mc_addr_set[i].addr_bytes;
1437 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1438 (mac_addr[4] << 8) | mac_addr[5];
1439 h = (mac_addr[0] << 8) | mac_addr[1];
1442 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1443 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1444 HW_ATL_B0_MAC_MIN + i);
1445 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1446 HW_ATL_B0_MAC_MIN + i);
1447 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1448 HW_ATL_B0_MAC_MIN + i);
1455 atl_reta_update(struct rte_eth_dev *dev,
1456 struct rte_eth_rss_reta_entry64 *reta_conf,
1460 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1461 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1463 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1464 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1465 dev->data->nb_rx_queues - 1);
1467 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1472 atl_reta_query(struct rte_eth_dev *dev,
1473 struct rte_eth_rss_reta_entry64 *reta_conf,
1477 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1479 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1480 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1481 reta_conf->mask = ~0U;
1486 atl_rss_hash_update(struct rte_eth_dev *dev,
1487 struct rte_eth_rss_conf *rss_conf)
1489 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1490 struct aq_hw_cfg_s *cfg =
1491 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1492 static u8 def_rss_key[40] = {
1493 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1494 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1495 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1496 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1497 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1500 cfg->is_rss = !!rss_conf->rss_hf;
1501 if (rss_conf->rss_key) {
1502 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1503 rss_conf->rss_key_len);
1504 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1506 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1507 sizeof(def_rss_key));
1508 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1511 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1512 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1517 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1518 struct rte_eth_rss_conf *rss_conf)
1520 struct aq_hw_cfg_s *cfg =
1521 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1523 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1524 if (rss_conf->rss_key) {
1525 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1526 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1527 rss_conf->rss_key_len);
1533 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1534 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1535 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1537 RTE_INIT(atl_init_log)
1539 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1540 if (atl_logtype_init >= 0)
1541 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1542 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1543 if (atl_logtype_driver >= 0)
1544 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);