1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Aquantia Corporation
5 #include <rte_ethdev_pci.h>
7 #include "atl_ethdev.h"
8 #include "atl_common.h"
9 #include "atl_hw_regs.h"
11 #include "hw_atl/hw_atl_llh.h"
12 #include "hw_atl/hw_atl_b0.h"
13 #include "hw_atl/hw_atl_b0_internal.h"
15 static int eth_atl_dev_init(struct rte_eth_dev *eth_dev);
16 static int eth_atl_dev_uninit(struct rte_eth_dev *eth_dev);
18 static int atl_dev_configure(struct rte_eth_dev *dev);
19 static int atl_dev_start(struct rte_eth_dev *dev);
20 static void atl_dev_stop(struct rte_eth_dev *dev);
21 static int atl_dev_set_link_up(struct rte_eth_dev *dev);
22 static int atl_dev_set_link_down(struct rte_eth_dev *dev);
23 static void atl_dev_close(struct rte_eth_dev *dev);
24 static int atl_dev_reset(struct rte_eth_dev *dev);
25 static void atl_dev_promiscuous_enable(struct rte_eth_dev *dev);
26 static void atl_dev_promiscuous_disable(struct rte_eth_dev *dev);
27 static void atl_dev_allmulticast_enable(struct rte_eth_dev *dev);
28 static void atl_dev_allmulticast_disable(struct rte_eth_dev *dev);
29 static int atl_dev_link_update(struct rte_eth_dev *dev, int wait);
31 static int atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
32 struct rte_eth_xstat_name *xstats_names,
35 static int atl_dev_stats_get(struct rte_eth_dev *dev,
36 struct rte_eth_stats *stats);
38 static int atl_dev_xstats_get(struct rte_eth_dev *dev,
39 struct rte_eth_xstat *stats, unsigned int n);
41 static void atl_dev_stats_reset(struct rte_eth_dev *dev);
43 static int atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
46 static void atl_dev_info_get(struct rte_eth_dev *dev,
47 struct rte_eth_dev_info *dev_info);
49 static const uint32_t *atl_dev_supported_ptypes_get(struct rte_eth_dev *dev);
51 static int atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
54 static int atl_vlan_filter_set(struct rte_eth_dev *dev,
55 uint16_t vlan_id, int on);
57 static int atl_vlan_offload_set(struct rte_eth_dev *dev, int mask);
59 static void atl_vlan_strip_queue_set(struct rte_eth_dev *dev,
60 uint16_t queue_id, int on);
62 static int atl_vlan_tpid_set(struct rte_eth_dev *dev,
63 enum rte_vlan_type vlan_type, uint16_t tpid);
66 static int atl_dev_get_eeprom_length(struct rte_eth_dev *dev);
67 static int atl_dev_get_eeprom(struct rte_eth_dev *dev,
68 struct rte_dev_eeprom_info *eeprom);
69 static int atl_dev_set_eeprom(struct rte_eth_dev *dev,
70 struct rte_dev_eeprom_info *eeprom);
73 static int atl_dev_get_regs(struct rte_eth_dev *dev,
74 struct rte_dev_reg_info *regs);
77 static int atl_flow_ctrl_get(struct rte_eth_dev *dev,
78 struct rte_eth_fc_conf *fc_conf);
79 static int atl_flow_ctrl_set(struct rte_eth_dev *dev,
80 struct rte_eth_fc_conf *fc_conf);
82 static void atl_dev_link_status_print(struct rte_eth_dev *dev);
85 static int atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
86 static int atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
87 static int atl_dev_interrupt_get_status(struct rte_eth_dev *dev);
88 static int atl_dev_interrupt_action(struct rte_eth_dev *dev,
89 struct rte_intr_handle *handle);
90 static void atl_dev_interrupt_handler(void *param);
93 static int atl_add_mac_addr(struct rte_eth_dev *dev,
94 struct ether_addr *mac_addr,
95 uint32_t index, uint32_t pool);
96 static void atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index);
97 static int atl_set_default_mac_addr(struct rte_eth_dev *dev,
98 struct ether_addr *mac_addr);
100 static int atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
101 struct ether_addr *mc_addr_set,
102 uint32_t nb_mc_addr);
105 static int atl_reta_update(struct rte_eth_dev *dev,
106 struct rte_eth_rss_reta_entry64 *reta_conf,
108 static int atl_reta_query(struct rte_eth_dev *dev,
109 struct rte_eth_rss_reta_entry64 *reta_conf,
111 static int atl_rss_hash_update(struct rte_eth_dev *dev,
112 struct rte_eth_rss_conf *rss_conf);
113 static int atl_rss_hash_conf_get(struct rte_eth_dev *dev,
114 struct rte_eth_rss_conf *rss_conf);
117 static int eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
118 struct rte_pci_device *pci_dev);
119 static int eth_atl_pci_remove(struct rte_pci_device *pci_dev);
121 static void atl_dev_info_get(struct rte_eth_dev *dev,
122 struct rte_eth_dev_info *dev_info);
124 int atl_logtype_init;
125 int atl_logtype_driver;
128 * The set of PCI devices this driver supports
130 static const struct rte_pci_id pci_id_atl_map[] = {
131 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_0001) },
132 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D100) },
133 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D107) },
134 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D108) },
135 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_D109) },
137 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100) },
138 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107) },
139 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108) },
140 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109) },
141 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111) },
142 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112) },
144 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC100S) },
145 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC107S) },
146 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC108S) },
147 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC109S) },
148 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111S) },
149 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112S) },
151 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC111E) },
152 { RTE_PCI_DEVICE(PCI_VENDOR_ID_AQUANTIA, AQ_DEVICE_ID_AQC112E) },
153 { .vendor_id = 0, /* sentinel */ },
156 static struct rte_pci_driver rte_atl_pmd = {
157 .id_table = pci_id_atl_map,
158 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
159 RTE_PCI_DRV_IOVA_AS_VA,
160 .probe = eth_atl_pci_probe,
161 .remove = eth_atl_pci_remove,
164 #define ATL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_STRIP \
165 | DEV_RX_OFFLOAD_IPV4_CKSUM \
166 | DEV_RX_OFFLOAD_UDP_CKSUM \
167 | DEV_RX_OFFLOAD_TCP_CKSUM \
168 | DEV_RX_OFFLOAD_JUMBO_FRAME)
170 #define ATL_TX_OFFLOADS (DEV_TX_OFFLOAD_VLAN_INSERT \
171 | DEV_TX_OFFLOAD_IPV4_CKSUM \
172 | DEV_TX_OFFLOAD_UDP_CKSUM \
173 | DEV_TX_OFFLOAD_TCP_CKSUM \
174 | DEV_TX_OFFLOAD_TCP_TSO \
175 | DEV_TX_OFFLOAD_MULTI_SEGS)
177 static const struct rte_eth_desc_lim rx_desc_lim = {
178 .nb_max = ATL_MAX_RING_DESC,
179 .nb_min = ATL_MIN_RING_DESC,
180 .nb_align = ATL_RXD_ALIGN,
183 static const struct rte_eth_desc_lim tx_desc_lim = {
184 .nb_max = ATL_MAX_RING_DESC,
185 .nb_min = ATL_MIN_RING_DESC,
186 .nb_align = ATL_TXD_ALIGN,
187 .nb_seg_max = ATL_TX_MAX_SEG,
188 .nb_mtu_seg_max = ATL_TX_MAX_SEG,
191 #define ATL_XSTATS_FIELD(name) { \
193 offsetof(struct aq_stats_s, name) \
196 struct atl_xstats_tbl_s {
201 static struct atl_xstats_tbl_s atl_xstats_tbl[] = {
202 ATL_XSTATS_FIELD(uprc),
203 ATL_XSTATS_FIELD(mprc),
204 ATL_XSTATS_FIELD(bprc),
205 ATL_XSTATS_FIELD(erpt),
206 ATL_XSTATS_FIELD(uptc),
207 ATL_XSTATS_FIELD(mptc),
208 ATL_XSTATS_FIELD(bptc),
209 ATL_XSTATS_FIELD(erpr),
210 ATL_XSTATS_FIELD(ubrc),
211 ATL_XSTATS_FIELD(ubtc),
212 ATL_XSTATS_FIELD(mbrc),
213 ATL_XSTATS_FIELD(mbtc),
214 ATL_XSTATS_FIELD(bbrc),
215 ATL_XSTATS_FIELD(bbtc),
218 static const struct eth_dev_ops atl_eth_dev_ops = {
219 .dev_configure = atl_dev_configure,
220 .dev_start = atl_dev_start,
221 .dev_stop = atl_dev_stop,
222 .dev_set_link_up = atl_dev_set_link_up,
223 .dev_set_link_down = atl_dev_set_link_down,
224 .dev_close = atl_dev_close,
225 .dev_reset = atl_dev_reset,
228 .promiscuous_enable = atl_dev_promiscuous_enable,
229 .promiscuous_disable = atl_dev_promiscuous_disable,
230 .allmulticast_enable = atl_dev_allmulticast_enable,
231 .allmulticast_disable = atl_dev_allmulticast_disable,
234 .link_update = atl_dev_link_update,
236 .get_reg = atl_dev_get_regs,
239 .stats_get = atl_dev_stats_get,
240 .xstats_get = atl_dev_xstats_get,
241 .xstats_get_names = atl_dev_xstats_get_names,
242 .stats_reset = atl_dev_stats_reset,
243 .xstats_reset = atl_dev_stats_reset,
245 .fw_version_get = atl_fw_version_get,
246 .dev_infos_get = atl_dev_info_get,
247 .dev_supported_ptypes_get = atl_dev_supported_ptypes_get,
249 .mtu_set = atl_dev_mtu_set,
252 .vlan_filter_set = atl_vlan_filter_set,
253 .vlan_offload_set = atl_vlan_offload_set,
254 .vlan_tpid_set = atl_vlan_tpid_set,
255 .vlan_strip_queue_set = atl_vlan_strip_queue_set,
258 .rx_queue_start = atl_rx_queue_start,
259 .rx_queue_stop = atl_rx_queue_stop,
260 .rx_queue_setup = atl_rx_queue_setup,
261 .rx_queue_release = atl_rx_queue_release,
263 .tx_queue_start = atl_tx_queue_start,
264 .tx_queue_stop = atl_tx_queue_stop,
265 .tx_queue_setup = atl_tx_queue_setup,
266 .tx_queue_release = atl_tx_queue_release,
268 .rx_queue_intr_enable = atl_dev_rx_queue_intr_enable,
269 .rx_queue_intr_disable = atl_dev_rx_queue_intr_disable,
271 .rx_queue_count = atl_rx_queue_count,
272 .rx_descriptor_status = atl_dev_rx_descriptor_status,
273 .tx_descriptor_status = atl_dev_tx_descriptor_status,
276 .get_eeprom_length = atl_dev_get_eeprom_length,
277 .get_eeprom = atl_dev_get_eeprom,
278 .set_eeprom = atl_dev_set_eeprom,
281 .flow_ctrl_get = atl_flow_ctrl_get,
282 .flow_ctrl_set = atl_flow_ctrl_set,
285 .mac_addr_add = atl_add_mac_addr,
286 .mac_addr_remove = atl_remove_mac_addr,
287 .mac_addr_set = atl_set_default_mac_addr,
288 .set_mc_addr_list = atl_dev_set_mc_addr_list,
289 .rxq_info_get = atl_rxq_info_get,
290 .txq_info_get = atl_txq_info_get,
292 .reta_update = atl_reta_update,
293 .reta_query = atl_reta_query,
294 .rss_hash_update = atl_rss_hash_update,
295 .rss_hash_conf_get = atl_rss_hash_conf_get,
298 static inline int32_t
299 atl_reset_hw(struct aq_hw_s *hw)
301 return hw_atl_b0_hw_reset(hw);
305 atl_enable_intr(struct rte_eth_dev *dev)
307 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
309 hw_atl_itr_irq_msk_setlsw_set(hw, 0xffffffff);
313 atl_disable_intr(struct aq_hw_s *hw)
315 PMD_INIT_FUNC_TRACE();
316 hw_atl_itr_irq_msk_clearlsw_set(hw, 0xffffffff);
320 eth_atl_dev_init(struct rte_eth_dev *eth_dev)
322 struct atl_adapter *adapter =
323 (struct atl_adapter *)eth_dev->data->dev_private;
324 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
325 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
326 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
329 PMD_INIT_FUNC_TRACE();
331 eth_dev->dev_ops = &atl_eth_dev_ops;
332 eth_dev->rx_pkt_burst = &atl_recv_pkts;
333 eth_dev->tx_pkt_burst = &atl_xmit_pkts;
334 eth_dev->tx_pkt_prepare = &atl_prep_pkts;
336 /* For secondary processes, the primary process has done all the work */
337 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
340 /* Vendor and Device ID need to be set before init of shared code */
341 hw->device_id = pci_dev->id.device_id;
342 hw->vendor_id = pci_dev->id.vendor_id;
343 hw->mmio = (void *)pci_dev->mem_resource[0].addr;
345 /* Hardware configuration - hardcode */
346 adapter->hw_cfg.is_lro = false;
347 adapter->hw_cfg.wol = false;
348 adapter->hw_cfg.is_rss = false;
349 adapter->hw_cfg.num_rss_queues = HW_ATL_B0_RSS_MAX;
351 adapter->hw_cfg.link_speed_msk = AQ_NIC_RATE_10G |
357 adapter->hw_cfg.flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
358 adapter->hw_cfg.aq_rss.indirection_table_size =
359 HW_ATL_B0_RSS_REDIRECTION_MAX;
361 hw->aq_nic_cfg = &adapter->hw_cfg;
363 /* disable interrupt */
364 atl_disable_intr(hw);
366 /* Allocate memory for storing MAC addresses */
367 eth_dev->data->mac_addrs = rte_zmalloc("atlantic", ETHER_ADDR_LEN, 0);
368 if (eth_dev->data->mac_addrs == NULL) {
369 PMD_INIT_LOG(ERR, "MAC Malloc failed");
373 err = hw_atl_utils_initfw(hw, &hw->aq_fw_ops);
377 /* Copy the permanent MAC address */
378 if (hw->aq_fw_ops->get_mac_permanent(hw,
379 eth_dev->data->mac_addrs->addr_bytes) != 0)
382 /* Reset the hw statistics */
383 atl_dev_stats_reset(eth_dev);
385 rte_intr_callback_register(intr_handle,
386 atl_dev_interrupt_handler, eth_dev);
388 /* enable uio/vfio intr/eventfd mapping */
389 rte_intr_enable(intr_handle);
391 /* enable support intr */
392 atl_enable_intr(eth_dev);
398 eth_atl_dev_uninit(struct rte_eth_dev *eth_dev)
400 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
401 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
404 PMD_INIT_FUNC_TRACE();
406 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
409 hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
411 if (hw->adapter_stopped == 0)
412 atl_dev_close(eth_dev);
414 eth_dev->dev_ops = NULL;
415 eth_dev->rx_pkt_burst = NULL;
416 eth_dev->tx_pkt_burst = NULL;
418 /* disable uio intr before callback unregister */
419 rte_intr_disable(intr_handle);
420 rte_intr_callback_unregister(intr_handle,
421 atl_dev_interrupt_handler, eth_dev);
423 rte_free(eth_dev->data->mac_addrs);
424 eth_dev->data->mac_addrs = NULL;
430 eth_atl_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
431 struct rte_pci_device *pci_dev)
433 return rte_eth_dev_pci_generic_probe(pci_dev,
434 sizeof(struct atl_adapter), eth_atl_dev_init);
438 eth_atl_pci_remove(struct rte_pci_device *pci_dev)
440 return rte_eth_dev_pci_generic_remove(pci_dev, eth_atl_dev_uninit);
444 atl_dev_configure(struct rte_eth_dev *dev)
446 struct atl_interrupt *intr =
447 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
449 PMD_INIT_FUNC_TRACE();
451 /* set flag to update link status after init */
452 intr->flags |= ATL_FLAG_NEED_LINK_UPDATE;
458 * Configure device link speed and setup link.
459 * It returns 0 on success.
462 atl_dev_start(struct rte_eth_dev *dev)
464 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
465 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
466 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
467 uint32_t intr_vector = 0;
468 uint32_t *link_speeds;
473 PMD_INIT_FUNC_TRACE();
475 /* set adapter started */
476 hw->adapter_stopped = 0;
478 if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) {
480 "Invalid link_speeds for port %u, fix speed not supported",
485 /* disable uio/vfio intr/eventfd mapping */
486 rte_intr_disable(intr_handle);
488 /* reinitialize adapter
489 * this calls reset and start
491 status = atl_reset_hw(hw);
495 err = hw_atl_b0_hw_init(hw, dev->data->mac_addrs->addr_bytes);
497 hw_atl_b0_hw_start(hw);
498 /* check and configure queue intr-vector mapping */
499 if ((rte_intr_cap_multiple(intr_handle) ||
500 !RTE_ETH_DEV_SRIOV(dev).active) &&
501 dev->data->dev_conf.intr_conf.rxq != 0) {
502 intr_vector = dev->data->nb_rx_queues;
503 if (intr_vector > ATL_MAX_INTR_QUEUE_NUM) {
504 PMD_INIT_LOG(ERR, "At most %d intr queues supported",
505 ATL_MAX_INTR_QUEUE_NUM);
508 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
509 PMD_INIT_LOG(ERR, "rte_intr_efd_enable failed");
514 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
515 intr_handle->intr_vec = rte_zmalloc("intr_vec",
516 dev->data->nb_rx_queues * sizeof(int), 0);
517 if (intr_handle->intr_vec == NULL) {
518 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
519 " intr_vec", dev->data->nb_rx_queues);
524 /* initialize transmission unit */
527 /* This can fail when allocating mbufs for descriptor rings */
528 err = atl_rx_init(dev);
530 PMD_INIT_LOG(ERR, "Unable to initialize RX hardware");
534 PMD_INIT_LOG(DEBUG, "FW version: %u.%u.%u",
535 hw->fw_ver_actual >> 24,
536 (hw->fw_ver_actual >> 16) & 0xFF,
537 hw->fw_ver_actual & 0xFFFF);
538 PMD_INIT_LOG(DEBUG, "Driver version: %s", ATL_PMD_DRIVER_VERSION);
540 err = atl_start_queues(dev);
542 PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
546 err = hw->aq_fw_ops->update_link_status(hw);
551 dev->data->dev_link.link_status = hw->aq_link_status.mbps != 0;
553 link_speeds = &dev->data->dev_conf.link_speeds;
557 if (*link_speeds == ETH_LINK_SPEED_AUTONEG) {
558 speed = hw->aq_nic_cfg->link_speed_msk;
560 if (*link_speeds & ETH_LINK_SPEED_10G)
561 speed |= AQ_NIC_RATE_10G;
562 if (*link_speeds & ETH_LINK_SPEED_5G)
563 speed |= AQ_NIC_RATE_5G;
564 if (*link_speeds & ETH_LINK_SPEED_1G)
565 speed |= AQ_NIC_RATE_1G;
566 if (*link_speeds & ETH_LINK_SPEED_2_5G)
567 speed |= AQ_NIC_RATE_2G5;
568 if (*link_speeds & ETH_LINK_SPEED_100M)
569 speed |= AQ_NIC_RATE_100M;
572 err = hw->aq_fw_ops->set_link_speed(hw, speed);
576 if (rte_intr_allow_others(intr_handle)) {
577 /* check if lsc interrupt is enabled */
578 if (dev->data->dev_conf.intr_conf.lsc != 0)
579 atl_dev_lsc_interrupt_setup(dev, true);
581 atl_dev_lsc_interrupt_setup(dev, false);
583 rte_intr_callback_unregister(intr_handle,
584 atl_dev_interrupt_handler, dev);
585 if (dev->data->dev_conf.intr_conf.lsc != 0)
586 PMD_INIT_LOG(INFO, "lsc won't enable because of"
587 " no intr multiplex");
590 /* check if rxq interrupt is enabled */
591 if (dev->data->dev_conf.intr_conf.rxq != 0 &&
592 rte_intr_dp_is_en(intr_handle))
593 atl_dev_rxq_interrupt_setup(dev);
595 /* enable uio/vfio intr/eventfd mapping */
596 rte_intr_enable(intr_handle);
598 /* resume enabled intr since hw reset */
599 atl_enable_intr(dev);
604 atl_stop_queues(dev);
609 * Stop device: disable rx and tx functions to allow for reconfiguring.
612 atl_dev_stop(struct rte_eth_dev *dev)
614 struct rte_eth_link link;
616 ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
617 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
618 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
620 PMD_INIT_FUNC_TRACE();
622 /* disable interrupts */
623 atl_disable_intr(hw);
627 hw->adapter_stopped = 1;
629 atl_stop_queues(dev);
631 /* Clear stored conf */
632 dev->data->scattered_rx = 0;
635 /* Clear recorded link status */
636 memset(&link, 0, sizeof(link));
637 rte_eth_linkstatus_set(dev, &link);
639 if (!rte_intr_allow_others(intr_handle))
640 /* resume to the default handler */
641 rte_intr_callback_register(intr_handle,
642 atl_dev_interrupt_handler,
645 /* Clean datapath event and queue/vec mapping */
646 rte_intr_efd_disable(intr_handle);
647 if (intr_handle->intr_vec != NULL) {
648 rte_free(intr_handle->intr_vec);
649 intr_handle->intr_vec = NULL;
654 * Set device link up: enable tx.
657 atl_dev_set_link_up(struct rte_eth_dev *dev)
659 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
661 return hw->aq_fw_ops->set_link_speed(hw,
662 hw->aq_nic_cfg->link_speed_msk);
666 * Set device link down: disable tx.
669 atl_dev_set_link_down(struct rte_eth_dev *dev)
671 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
673 return hw->aq_fw_ops->set_link_speed(hw, 0);
677 * Reset and stop device.
680 atl_dev_close(struct rte_eth_dev *dev)
682 PMD_INIT_FUNC_TRACE();
686 atl_free_queues(dev);
690 atl_dev_reset(struct rte_eth_dev *dev)
694 ret = eth_atl_dev_uninit(dev);
698 ret = eth_atl_dev_init(dev);
705 atl_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
707 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
708 struct aq_hw_s *hw = &adapter->hw;
709 struct atl_sw_stats *swstats = &adapter->sw_stats;
712 hw->aq_fw_ops->update_stats(hw);
714 /* Fill out the rte_eth_stats statistics structure */
715 stats->ipackets = hw->curr_stats.dma_pkt_rc;
716 stats->ibytes = hw->curr_stats.dma_oct_rc;
717 stats->imissed = hw->curr_stats.dpc;
718 stats->ierrors = hw->curr_stats.erpt;
720 stats->opackets = hw->curr_stats.dma_pkt_tc;
721 stats->obytes = hw->curr_stats.dma_oct_tc;
724 stats->rx_nombuf = swstats->rx_nombuf;
726 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
727 stats->q_ipackets[i] = swstats->q_ipackets[i];
728 stats->q_opackets[i] = swstats->q_opackets[i];
729 stats->q_ibytes[i] = swstats->q_ibytes[i];
730 stats->q_obytes[i] = swstats->q_obytes[i];
731 stats->q_errors[i] = swstats->q_errors[i];
737 atl_dev_stats_reset(struct rte_eth_dev *dev)
739 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
740 struct aq_hw_s *hw = &adapter->hw;
742 hw->aq_fw_ops->update_stats(hw);
744 /* Reset software totals */
745 memset(&hw->curr_stats, 0, sizeof(hw->curr_stats));
747 memset(&adapter->sw_stats, 0, sizeof(adapter->sw_stats));
751 atl_dev_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
752 struct rte_eth_xstat_name *xstats_names,
758 return RTE_DIM(atl_xstats_tbl);
760 for (i = 0; i < size && i < RTE_DIM(atl_xstats_tbl); i++)
761 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
762 atl_xstats_tbl[i].name);
768 atl_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats,
771 struct atl_adapter *adapter = ATL_DEV_TO_ADAPTER(dev);
772 struct aq_hw_s *hw = &adapter->hw;
778 for (i = 0; i < n && i < RTE_DIM(atl_xstats_tbl); i++) {
780 stats[i].value = *(u64 *)((uint8_t *)&hw->curr_stats +
781 atl_xstats_tbl[i].offset);
788 atl_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
790 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
792 unsigned int ret = 0;
794 ret = hw_atl_utils_get_fw_version(hw, &fw_ver);
798 ret = snprintf(fw_version, fw_size, "%u.%u.%u", fw_ver >> 24,
799 (fw_ver >> 16) & 0xFFU, fw_ver & 0xFFFFU);
801 ret += 1; /* add string null-terminator */
810 atl_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
812 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
814 dev_info->max_rx_queues = AQ_HW_MAX_RX_QUEUES;
815 dev_info->max_tx_queues = AQ_HW_MAX_TX_QUEUES;
817 dev_info->min_rx_bufsize = 1024;
818 dev_info->max_rx_pktlen = HW_ATL_B0_MTU_JUMBO;
819 dev_info->max_mac_addrs = HW_ATL_B0_MAC_MAX;
820 dev_info->max_vfs = pci_dev->max_vfs;
822 dev_info->max_hash_mac_addrs = 0;
823 dev_info->max_vmdq_pools = 0;
824 dev_info->vmdq_queue_num = 0;
826 dev_info->rx_offload_capa = ATL_RX_OFFLOADS;
828 dev_info->tx_offload_capa = ATL_TX_OFFLOADS;
831 dev_info->default_rxconf = (struct rte_eth_rxconf) {
832 .rx_free_thresh = ATL_DEFAULT_RX_FREE_THRESH,
835 dev_info->default_txconf = (struct rte_eth_txconf) {
836 .tx_free_thresh = ATL_DEFAULT_TX_FREE_THRESH,
839 dev_info->rx_desc_lim = rx_desc_lim;
840 dev_info->tx_desc_lim = tx_desc_lim;
842 dev_info->hash_key_size = HW_ATL_B0_RSS_HASHKEY_BITS / 8;
843 dev_info->reta_size = HW_ATL_B0_RSS_REDIRECTION_MAX;
844 dev_info->flow_type_rss_offloads = ATL_RSS_OFFLOAD_ALL;
846 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G;
847 dev_info->speed_capa |= ETH_LINK_SPEED_100M;
848 dev_info->speed_capa |= ETH_LINK_SPEED_2_5G;
849 dev_info->speed_capa |= ETH_LINK_SPEED_5G;
852 static const uint32_t *
853 atl_dev_supported_ptypes_get(struct rte_eth_dev *dev)
855 static const uint32_t ptypes[] = {
857 RTE_PTYPE_L2_ETHER_ARP,
858 RTE_PTYPE_L2_ETHER_VLAN,
868 if (dev->rx_pkt_burst == atl_recv_pkts)
874 /* return 0 means link status changed, -1 means not changed */
876 atl_dev_link_update(struct rte_eth_dev *dev, int wait __rte_unused)
878 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
879 struct atl_interrupt *intr =
880 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
881 struct rte_eth_link link, old;
884 link.link_status = ETH_LINK_DOWN;
886 link.link_duplex = ETH_LINK_FULL_DUPLEX;
887 link.link_autoneg = hw->is_autoneg ? ETH_LINK_AUTONEG : ETH_LINK_FIXED;
888 memset(&old, 0, sizeof(old));
890 /* load old link status */
891 rte_eth_linkstatus_get(dev, &old);
893 /* read current link status */
894 err = hw->aq_fw_ops->update_link_status(hw);
899 if (hw->aq_link_status.mbps == 0) {
900 /* write default (down) link status */
901 rte_eth_linkstatus_set(dev, &link);
902 if (link.link_status == old.link_status)
907 intr->flags &= ~ATL_FLAG_NEED_LINK_CONFIG;
909 link.link_status = ETH_LINK_UP;
910 link.link_duplex = ETH_LINK_FULL_DUPLEX;
911 link.link_speed = hw->aq_link_status.mbps;
913 rte_eth_linkstatus_set(dev, &link);
915 if (link.link_status == old.link_status)
922 atl_dev_promiscuous_enable(struct rte_eth_dev *dev)
924 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
926 hw_atl_rpfl2promiscuous_mode_en_set(hw, true);
930 atl_dev_promiscuous_disable(struct rte_eth_dev *dev)
932 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
934 hw_atl_rpfl2promiscuous_mode_en_set(hw, false);
938 atl_dev_allmulticast_enable(struct rte_eth_dev *dev)
940 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
942 hw_atl_rpfl2_accept_all_mc_packets_set(hw, true);
946 atl_dev_allmulticast_disable(struct rte_eth_dev *dev)
948 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
950 if (dev->data->promiscuous == 1)
951 return; /* must remain in all_multicast mode */
953 hw_atl_rpfl2_accept_all_mc_packets_set(hw, false);
957 * It clears the interrupt causes and enables the interrupt.
958 * It will be called once only during nic initialized.
961 * Pointer to struct rte_eth_dev.
966 * - On success, zero.
967 * - On failure, a negative value.
971 atl_dev_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on __rte_unused)
973 atl_dev_link_status_print(dev);
978 atl_dev_rxq_interrupt_setup(struct rte_eth_dev *dev __rte_unused)
985 atl_dev_interrupt_get_status(struct rte_eth_dev *dev)
987 struct atl_interrupt *intr =
988 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
989 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
992 hw_atl_b0_hw_irq_read(hw, &cause);
994 atl_disable_intr(hw);
995 intr->flags = cause & BIT(ATL_IRQ_CAUSE_LINK) ?
996 ATL_FLAG_NEED_LINK_UPDATE : 0;
1002 * It gets and then prints the link status.
1005 * Pointer to struct rte_eth_dev.
1008 * - On success, zero.
1009 * - On failure, a negative value.
1012 atl_dev_link_status_print(struct rte_eth_dev *dev)
1014 struct rte_eth_link link;
1016 memset(&link, 0, sizeof(link));
1017 rte_eth_linkstatus_get(dev, &link);
1018 if (link.link_status) {
1019 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1020 (int)(dev->data->port_id),
1021 (unsigned int)link.link_speed,
1022 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
1023 "full-duplex" : "half-duplex");
1025 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1026 (int)(dev->data->port_id));
1032 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1034 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
1035 pci_dev->addr.domain,
1037 pci_dev->addr.devid,
1038 pci_dev->addr.function);
1042 PMD_DRV_LOG(INFO, "Link speed:%d", link.link_speed);
1046 * It executes link_update after knowing an interrupt occurred.
1049 * Pointer to struct rte_eth_dev.
1052 * - On success, zero.
1053 * - On failure, a negative value.
1056 atl_dev_interrupt_action(struct rte_eth_dev *dev,
1057 struct rte_intr_handle *intr_handle)
1059 struct atl_interrupt *intr =
1060 ATL_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
1062 if (intr->flags & ATL_FLAG_NEED_LINK_UPDATE) {
1063 atl_dev_link_update(dev, 0);
1064 intr->flags &= ~ATL_FLAG_NEED_LINK_UPDATE;
1065 atl_dev_link_status_print(dev);
1066 _rte_eth_dev_callback_process(dev,
1067 RTE_ETH_EVENT_INTR_LSC, NULL);
1070 atl_enable_intr(dev);
1071 rte_intr_enable(intr_handle);
1077 * Interrupt handler triggered by NIC for handling
1078 * specific interrupt.
1081 * Pointer to interrupt handle.
1083 * The address of parameter (struct rte_eth_dev *) regsitered before.
1089 atl_dev_interrupt_handler(void *param)
1091 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1093 atl_dev_interrupt_get_status(dev);
1094 atl_dev_interrupt_action(dev, dev->intr_handle);
1097 #define SFP_EEPROM_SIZE 0xff
1100 atl_dev_get_eeprom_length(struct rte_eth_dev *dev __rte_unused)
1102 return SFP_EEPROM_SIZE;
1106 atl_dev_get_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1108 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1110 if (hw->aq_fw_ops->get_eeprom == NULL)
1113 if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1116 return hw->aq_fw_ops->get_eeprom(hw, eeprom->data, eeprom->length);
1120 atl_dev_set_eeprom(struct rte_eth_dev *dev, struct rte_dev_eeprom_info *eeprom)
1122 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1124 if (hw->aq_fw_ops->set_eeprom == NULL)
1127 if (eeprom->length != SFP_EEPROM_SIZE || eeprom->data == NULL)
1130 return hw->aq_fw_ops->set_eeprom(hw, eeprom->data, eeprom->length);
1134 atl_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs)
1136 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1140 if (regs->data == NULL) {
1141 regs->length = hw_atl_utils_hw_get_reg_length();
1142 regs->width = sizeof(u32);
1146 /* Only full register dump is supported */
1147 if (regs->length && regs->length != hw_atl_utils_hw_get_reg_length())
1150 err = hw_atl_utils_hw_get_regs(hw, regs->data);
1152 /* Device version */
1153 mif_id = hw_atl_reg_glb_mif_id_get(hw);
1154 regs->version = mif_id & 0xFFU;
1160 atl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1162 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1164 if (hw->aq_nic_cfg->flow_control == AQ_NIC_FC_OFF)
1165 fc_conf->mode = RTE_FC_NONE;
1166 else if (hw->aq_nic_cfg->flow_control & (AQ_NIC_FC_RX | AQ_NIC_FC_TX))
1167 fc_conf->mode = RTE_FC_FULL;
1168 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1169 fc_conf->mode = RTE_FC_RX_PAUSE;
1170 else if (hw->aq_nic_cfg->flow_control & AQ_NIC_FC_RX)
1171 fc_conf->mode = RTE_FC_TX_PAUSE;
1177 atl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1179 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1180 uint32_t old_flow_control = hw->aq_nic_cfg->flow_control;
1183 if (hw->aq_fw_ops->set_flow_control == NULL)
1186 if (fc_conf->mode == RTE_FC_NONE)
1187 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_OFF;
1188 else if (fc_conf->mode == RTE_FC_RX_PAUSE)
1189 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_RX;
1190 else if (fc_conf->mode == RTE_FC_TX_PAUSE)
1191 hw->aq_nic_cfg->flow_control = AQ_NIC_FC_TX;
1192 else if (fc_conf->mode == RTE_FC_FULL)
1193 hw->aq_nic_cfg->flow_control = (AQ_NIC_FC_RX | AQ_NIC_FC_TX);
1195 if (old_flow_control != hw->aq_nic_cfg->flow_control)
1196 return hw->aq_fw_ops->set_flow_control(hw);
1202 atl_update_mac_addr(struct rte_eth_dev *dev, uint32_t index,
1203 u8 *mac_addr, bool enable)
1205 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1206 unsigned int h = 0U;
1207 unsigned int l = 0U;
1211 h = (mac_addr[0] << 8) | (mac_addr[1]);
1212 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1213 (mac_addr[4] << 8) | mac_addr[5];
1216 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, index);
1217 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l, index);
1218 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h, index);
1221 hw_atl_rpfl2_uc_flr_en_set(hw, 1U, index);
1223 err = aq_hw_err_from_flags(hw);
1229 atl_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1230 uint32_t index __rte_unused, uint32_t pool __rte_unused)
1232 if (is_zero_ether_addr(mac_addr)) {
1233 PMD_DRV_LOG(ERR, "Invalid Ethernet Address");
1237 return atl_update_mac_addr(dev, index, (u8 *)mac_addr, true);
1241 atl_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
1243 atl_update_mac_addr(dev, index, NULL, false);
1247 atl_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
1249 atl_remove_mac_addr(dev, 0);
1250 atl_add_mac_addr(dev, addr, 0, 0);
1255 atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1257 struct rte_eth_dev_info dev_info;
1258 uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
1260 atl_dev_info_get(dev, &dev_info);
1262 if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen))
1265 /* update max frame size */
1266 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1272 atl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1274 struct aq_hw_cfg_s *cfg =
1275 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1276 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1280 PMD_INIT_FUNC_TRACE();
1282 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1283 if (cfg->vlan_filter[i] == vlan_id) {
1285 /* Disable VLAN filter. */
1286 hw_atl_rpf_vlan_flr_en_set(hw, 0U, i);
1288 /* Clear VLAN filter entry */
1289 cfg->vlan_filter[i] = 0;
1295 /* VLAN_ID was not found. So, nothing to delete. */
1296 if (i == HW_ATL_B0_MAX_VLAN_IDS && !on)
1299 /* VLAN_ID already exist, or already removed above. Nothing to do. */
1300 if (i != HW_ATL_B0_MAX_VLAN_IDS)
1303 /* Try to found free VLAN filter to add new VLAN_ID */
1304 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1305 if (cfg->vlan_filter[i] == 0)
1309 if (i == HW_ATL_B0_MAX_VLAN_IDS) {
1310 /* We have no free VLAN filter to add new VLAN_ID*/
1315 cfg->vlan_filter[i] = vlan_id;
1316 hw_atl_rpf_vlan_flr_act_set(hw, 1U, i);
1317 hw_atl_rpf_vlan_id_flr_set(hw, vlan_id, i);
1318 hw_atl_rpf_vlan_flr_en_set(hw, 1U, i);
1321 /* Enable VLAN promisc mode if vlan_filter empty */
1322 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1323 if (cfg->vlan_filter[i] != 0)
1327 hw_atl_rpf_vlan_prom_mode_en_set(hw, i == HW_ATL_B0_MAX_VLAN_IDS);
1333 atl_enable_vlan_filter(struct rte_eth_dev *dev, int en)
1335 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1336 struct aq_hw_cfg_s *cfg =
1337 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1340 PMD_INIT_FUNC_TRACE();
1342 for (i = 0; i < HW_ATL_B0_MAX_VLAN_IDS; i++) {
1343 if (cfg->vlan_filter[i])
1344 hw_atl_rpf_vlan_flr_en_set(hw, en, i);
1350 atl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1352 struct aq_hw_cfg_s *cfg =
1353 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1354 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1358 PMD_INIT_FUNC_TRACE();
1360 ret = atl_enable_vlan_filter(dev, mask & ETH_VLAN_FILTER_MASK);
1362 cfg->vlan_strip = !!(mask & ETH_VLAN_STRIP_MASK);
1364 for (i = 0; i < dev->data->nb_rx_queues; i++)
1365 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, cfg->vlan_strip, i);
1367 if (mask & ETH_VLAN_EXTEND_MASK)
1374 atl_vlan_tpid_set(struct rte_eth_dev *dev, enum rte_vlan_type vlan_type,
1377 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1380 PMD_INIT_FUNC_TRACE();
1382 switch (vlan_type) {
1383 case ETH_VLAN_TYPE_INNER:
1384 hw_atl_rpf_vlan_inner_etht_set(hw, tpid);
1386 case ETH_VLAN_TYPE_OUTER:
1387 hw_atl_rpf_vlan_outer_etht_set(hw, tpid);
1390 PMD_DRV_LOG(ERR, "Unsupported VLAN type");
1398 atl_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue_id, int on)
1400 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1402 PMD_INIT_FUNC_TRACE();
1404 if (queue_id > dev->data->nb_rx_queues) {
1405 PMD_DRV_LOG(ERR, "Invalid queue id");
1409 hw_atl_rpo_rx_desc_vlan_stripping_set(hw, on, queue_id);
1413 atl_dev_set_mc_addr_list(struct rte_eth_dev *dev,
1414 struct ether_addr *mc_addr_set,
1415 uint32_t nb_mc_addr)
1417 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1420 if (nb_mc_addr > AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN)
1423 /* Update whole uc filters table */
1424 for (i = 0; i < AQ_HW_MULTICAST_ADDRESS_MAX - HW_ATL_B0_MAC_MIN; i++) {
1425 u8 *mac_addr = NULL;
1428 if (i < nb_mc_addr) {
1429 mac_addr = mc_addr_set[i].addr_bytes;
1430 l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1431 (mac_addr[4] << 8) | mac_addr[5];
1432 h = (mac_addr[0] << 8) | mac_addr[1];
1435 hw_atl_rpfl2_uc_flr_en_set(hw, 0U, HW_ATL_B0_MAC_MIN + i);
1436 hw_atl_rpfl2unicast_dest_addresslsw_set(hw, l,
1437 HW_ATL_B0_MAC_MIN + i);
1438 hw_atl_rpfl2unicast_dest_addressmsw_set(hw, h,
1439 HW_ATL_B0_MAC_MIN + i);
1440 hw_atl_rpfl2_uc_flr_en_set(hw, !!mac_addr,
1441 HW_ATL_B0_MAC_MIN + i);
1448 atl_reta_update(struct rte_eth_dev *dev,
1449 struct rte_eth_rss_reta_entry64 *reta_conf,
1453 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1454 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1456 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1457 cf->aq_rss.indirection_table[i] = min(reta_conf->reta[i],
1458 dev->data->nb_rx_queues - 1);
1460 hw_atl_b0_hw_rss_set(hw, &cf->aq_rss);
1465 atl_reta_query(struct rte_eth_dev *dev,
1466 struct rte_eth_rss_reta_entry64 *reta_conf,
1470 struct aq_hw_cfg_s *cf = ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1472 for (i = 0; i < reta_size && i < cf->aq_rss.indirection_table_size; i++)
1473 reta_conf->reta[i] = cf->aq_rss.indirection_table[i];
1474 reta_conf->mask = ~0U;
1479 atl_rss_hash_update(struct rte_eth_dev *dev,
1480 struct rte_eth_rss_conf *rss_conf)
1482 struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1483 struct aq_hw_cfg_s *cfg =
1484 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1485 static u8 def_rss_key[40] = {
1486 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
1487 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
1488 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
1489 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
1490 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
1493 cfg->is_rss = !!rss_conf->rss_hf;
1494 if (rss_conf->rss_key) {
1495 memcpy(cfg->aq_rss.hash_secret_key, rss_conf->rss_key,
1496 rss_conf->rss_key_len);
1497 cfg->aq_rss.hash_secret_key_size = rss_conf->rss_key_len;
1499 memcpy(cfg->aq_rss.hash_secret_key, def_rss_key,
1500 sizeof(def_rss_key));
1501 cfg->aq_rss.hash_secret_key_size = sizeof(def_rss_key);
1504 hw_atl_b0_hw_rss_set(hw, &cfg->aq_rss);
1505 hw_atl_b0_hw_rss_hash_set(hw, &cfg->aq_rss);
1510 atl_rss_hash_conf_get(struct rte_eth_dev *dev,
1511 struct rte_eth_rss_conf *rss_conf)
1513 struct aq_hw_cfg_s *cfg =
1514 ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private);
1516 rss_conf->rss_hf = cfg->is_rss ? ATL_RSS_OFFLOAD_ALL : 0;
1517 if (rss_conf->rss_key) {
1518 rss_conf->rss_key_len = cfg->aq_rss.hash_secret_key_size;
1519 memcpy(rss_conf->rss_key, cfg->aq_rss.hash_secret_key,
1520 rss_conf->rss_key_len);
1526 RTE_PMD_REGISTER_PCI(net_atlantic, rte_atl_pmd);
1527 RTE_PMD_REGISTER_PCI_TABLE(net_atlantic, pci_id_atl_map);
1528 RTE_PMD_REGISTER_KMOD_DEP(net_atlantic, "* igb_uio | uio_pci_generic");
1530 RTE_INIT(atl_init_log)
1532 atl_logtype_init = rte_log_register("pmd.net.atlantic.init");
1533 if (atl_logtype_init >= 0)
1534 rte_log_set_level(atl_logtype_init, RTE_LOG_NOTICE);
1535 atl_logtype_driver = rte_log_register("pmd.net.atlantic.driver");
1536 if (atl_logtype_driver >= 0)
1537 rte_log_set_level(atl_logtype_driver, RTE_LOG_NOTICE);