1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
8 #include <rte_string_fns.h>
10 #include <rte_bus_pci.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_malloc.h>
14 #include <rte_alarm.h>
19 #define IGC_INTEL_VENDOR_ID 0x8086
22 * The overhead from MTU to max frame size.
23 * Considering VLAN so tag needs to be counted.
25 #define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
26 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
28 #define IGC_FC_PAUSE_TIME 0x0680
29 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
30 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
32 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
33 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
34 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */
35 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */
37 #define IGC_DEFAULT_RX_FREE_THRESH 32
39 #define IGC_DEFAULT_RX_PTHRESH 8
40 #define IGC_DEFAULT_RX_HTHRESH 8
41 #define IGC_DEFAULT_RX_WTHRESH 4
43 #define IGC_DEFAULT_TX_PTHRESH 8
44 #define IGC_DEFAULT_TX_HTHRESH 1
45 #define IGC_DEFAULT_TX_WTHRESH 16
47 /* MSI-X other interrupt vector */
48 #define IGC_MSIX_OTHER_INTR_VEC 0
50 /* External VLAN Enable bit mask */
51 #define IGC_CTRL_EXT_EXT_VLAN (1u << 26)
53 /* Per Queue Good Packets Received Count */
54 #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx))
55 /* Per Queue Good Octets Received Count */
56 #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx))
57 /* Per Queue Good Octets Transmitted Count */
58 #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx))
59 /* Per Queue Multicast Packets Received Count */
60 #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx))
61 /* Transmit Queue Drop Packet Count */
62 #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx))
64 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
65 #define U32_0_IN_U64 0 /* lower bytes of u64 */
66 #define U32_1_IN_U64 1 /* higher bytes of u64 */
68 #define U32_0_IN_U64 1
69 #define U32_1_IN_U64 0
72 #define IGC_ALARM_INTERVAL 8000000u
73 /* us, about 13.6s some per-queue registers will wrap around back to 0. */
75 static const struct rte_eth_desc_lim rx_desc_lim = {
76 .nb_max = IGC_MAX_RXD,
77 .nb_min = IGC_MIN_RXD,
78 .nb_align = IGC_RXD_ALIGN,
81 static const struct rte_eth_desc_lim tx_desc_lim = {
82 .nb_max = IGC_MAX_TXD,
83 .nb_min = IGC_MIN_TXD,
84 .nb_align = IGC_TXD_ALIGN,
85 .nb_seg_max = IGC_TX_MAX_SEG,
86 .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,
89 static const struct rte_pci_id pci_id_igc_map[] = {
90 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
91 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) },
92 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) },
93 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) },
94 { .vendor_id = 0, /* sentinel */ },
97 /* store statistics names and its offset in stats structure */
98 struct rte_igc_xstats_name_off {
99 char name[RTE_ETH_XSTATS_NAME_SIZE];
103 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {
104 {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)},
105 {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)},
106 {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)},
107 {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)},
108 {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)},
109 {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)},
110 {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats,
112 {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)},
113 {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)},
114 {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)},
115 {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)},
116 {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)},
117 {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)},
118 {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)},
119 {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)},
120 {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)},
121 {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)},
122 {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats,
124 {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)},
125 {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)},
126 {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)},
127 {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)},
128 {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
130 {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats,
132 {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)},
133 {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)},
134 {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)},
135 {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)},
136 {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)},
137 {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)},
138 {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)},
139 {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)},
140 {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)},
141 {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)},
142 {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)},
143 {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)},
144 {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)},
145 {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)},
146 {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)},
147 {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)},
148 {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)},
149 {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)},
150 {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
152 {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats,
154 {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)},
155 {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)},
156 {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)},
157 {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)},
158 {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)},
159 {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)},
160 {"rx_descriptor_lower_threshold",
161 offsetof(struct igc_hw_stats, icrxdmtc)},
164 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \
165 sizeof(rte_igc_stats_strings[0]))
167 static int eth_igc_configure(struct rte_eth_dev *dev);
168 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
169 static void eth_igc_stop(struct rte_eth_dev *dev);
170 static int eth_igc_start(struct rte_eth_dev *dev);
171 static int eth_igc_set_link_up(struct rte_eth_dev *dev);
172 static int eth_igc_set_link_down(struct rte_eth_dev *dev);
173 static void eth_igc_close(struct rte_eth_dev *dev);
174 static int eth_igc_reset(struct rte_eth_dev *dev);
175 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
176 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
177 static int eth_igc_fw_version_get(struct rte_eth_dev *dev,
178 char *fw_version, size_t fw_size);
179 static int eth_igc_infos_get(struct rte_eth_dev *dev,
180 struct rte_eth_dev_info *dev_info);
181 static int eth_igc_led_on(struct rte_eth_dev *dev);
182 static int eth_igc_led_off(struct rte_eth_dev *dev);
183 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);
184 static int eth_igc_rar_set(struct rte_eth_dev *dev,
185 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);
186 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);
187 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
188 struct rte_ether_addr *addr);
189 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
190 struct rte_ether_addr *mc_addr_set,
191 uint32_t nb_mc_addr);
192 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);
193 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);
194 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
195 static int eth_igc_stats_get(struct rte_eth_dev *dev,
196 struct rte_eth_stats *rte_stats);
197 static int eth_igc_xstats_get(struct rte_eth_dev *dev,
198 struct rte_eth_xstat *xstats, unsigned int n);
199 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev,
201 uint64_t *values, unsigned int n);
202 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,
203 struct rte_eth_xstat_name *xstats_names,
205 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
206 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
208 static int eth_igc_xstats_reset(struct rte_eth_dev *dev);
210 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
211 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx);
213 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
215 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
217 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
219 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
221 static const struct eth_dev_ops eth_igc_ops = {
222 .dev_configure = eth_igc_configure,
223 .link_update = eth_igc_link_update,
224 .dev_stop = eth_igc_stop,
225 .dev_start = eth_igc_start,
226 .dev_close = eth_igc_close,
227 .dev_reset = eth_igc_reset,
228 .dev_set_link_up = eth_igc_set_link_up,
229 .dev_set_link_down = eth_igc_set_link_down,
230 .promiscuous_enable = eth_igc_promiscuous_enable,
231 .promiscuous_disable = eth_igc_promiscuous_disable,
232 .allmulticast_enable = eth_igc_allmulticast_enable,
233 .allmulticast_disable = eth_igc_allmulticast_disable,
234 .fw_version_get = eth_igc_fw_version_get,
235 .dev_infos_get = eth_igc_infos_get,
236 .dev_led_on = eth_igc_led_on,
237 .dev_led_off = eth_igc_led_off,
238 .dev_supported_ptypes_get = eth_igc_supported_ptypes_get,
239 .mtu_set = eth_igc_mtu_set,
240 .mac_addr_add = eth_igc_rar_set,
241 .mac_addr_remove = eth_igc_rar_clear,
242 .mac_addr_set = eth_igc_default_mac_addr_set,
243 .set_mc_addr_list = eth_igc_set_mc_addr_list,
245 .rx_queue_setup = eth_igc_rx_queue_setup,
246 .rx_queue_release = eth_igc_rx_queue_release,
247 .rx_queue_count = eth_igc_rx_queue_count,
248 .rx_descriptor_done = eth_igc_rx_descriptor_done,
249 .rx_descriptor_status = eth_igc_rx_descriptor_status,
250 .tx_descriptor_status = eth_igc_tx_descriptor_status,
251 .tx_queue_setup = eth_igc_tx_queue_setup,
252 .tx_queue_release = eth_igc_tx_queue_release,
253 .tx_done_cleanup = eth_igc_tx_done_cleanup,
254 .rxq_info_get = eth_igc_rxq_info_get,
255 .txq_info_get = eth_igc_txq_info_get,
256 .stats_get = eth_igc_stats_get,
257 .xstats_get = eth_igc_xstats_get,
258 .xstats_get_by_id = eth_igc_xstats_get_by_id,
259 .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id,
260 .xstats_get_names = eth_igc_xstats_get_names,
261 .stats_reset = eth_igc_xstats_reset,
262 .xstats_reset = eth_igc_xstats_reset,
263 .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,
264 .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable,
265 .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable,
266 .flow_ctrl_get = eth_igc_flow_ctrl_get,
267 .flow_ctrl_set = eth_igc_flow_ctrl_set,
271 * multiple queue mode checking
274 igc_check_mq_mode(struct rte_eth_dev *dev)
276 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
277 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
279 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
280 PMD_INIT_LOG(ERR, "SRIOV is not supported.");
284 if (rx_mq_mode != ETH_MQ_RX_NONE &&
285 rx_mq_mode != ETH_MQ_RX_RSS) {
286 /* RSS together with VMDq not supported*/
287 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
292 /* To no break software that set invalid mode, only display
293 * warning if invalid mode is used.
295 if (tx_mq_mode != ETH_MQ_TX_NONE)
296 PMD_INIT_LOG(WARNING,
297 "TX mode %d is not supported. Due to meaningless in this driver, just ignore",
304 eth_igc_configure(struct rte_eth_dev *dev)
306 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
309 PMD_INIT_FUNC_TRACE();
311 ret = igc_check_mq_mode(dev);
315 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
320 eth_igc_set_link_up(struct rte_eth_dev *dev)
322 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
324 if (hw->phy.media_type == igc_media_type_copper)
325 igc_power_up_phy(hw);
327 igc_power_up_fiber_serdes_link(hw);
332 eth_igc_set_link_down(struct rte_eth_dev *dev)
334 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
336 if (hw->phy.media_type == igc_media_type_copper)
337 igc_power_down_phy(hw);
339 igc_shutdown_fiber_serdes_link(hw);
344 * disable other interrupt
347 igc_intr_other_disable(struct rte_eth_dev *dev)
349 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
350 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
351 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
353 if (rte_intr_allow_others(intr_handle) &&
354 dev->data->dev_conf.intr_conf.lsc) {
355 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC);
358 IGC_WRITE_REG(hw, IGC_IMC, ~0);
363 * enable other interrupt
366 igc_intr_other_enable(struct rte_eth_dev *dev)
368 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
369 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
370 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
371 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
373 if (rte_intr_allow_others(intr_handle) &&
374 dev->data->dev_conf.intr_conf.lsc) {
375 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC);
378 IGC_WRITE_REG(hw, IGC_IMS, intr->mask);
383 * It reads ICR and gets interrupt causes, check it and set a bit flag
384 * to update link status.
387 eth_igc_interrupt_get_status(struct rte_eth_dev *dev)
390 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
391 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
393 /* read-on-clear nic registers here */
394 icr = IGC_READ_REG(hw, IGC_ICR);
397 if (icr & IGC_ICR_LSC)
398 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
401 /* return 0 means link status changed, -1 means not changed */
403 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
405 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
406 struct rte_eth_link link;
407 int link_check, count;
410 hw->mac.get_link_status = 1;
412 /* possible wait-to-complete in up to 9 seconds */
413 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {
414 /* Read the real link status */
415 switch (hw->phy.media_type) {
416 case igc_media_type_copper:
417 /* Do the work to read phy */
418 igc_check_for_link(hw);
419 link_check = !hw->mac.get_link_status;
422 case igc_media_type_fiber:
423 igc_check_for_link(hw);
424 link_check = (IGC_READ_REG(hw, IGC_STATUS) &
428 case igc_media_type_internal_serdes:
429 igc_check_for_link(hw);
430 link_check = hw->mac.serdes_has_link;
436 if (link_check || wait_to_complete == 0)
438 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);
440 memset(&link, 0, sizeof(link));
442 /* Now we check if a transition has happened */
444 uint16_t duplex, speed;
445 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
446 link.link_duplex = (duplex == FULL_DUPLEX) ?
447 ETH_LINK_FULL_DUPLEX :
448 ETH_LINK_HALF_DUPLEX;
449 link.link_speed = speed;
450 link.link_status = ETH_LINK_UP;
451 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
452 ETH_LINK_SPEED_FIXED);
454 if (speed == SPEED_2500) {
455 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
456 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {
457 tipg &= ~IGC_TIPG_IPGT_MASK;
459 IGC_WRITE_REG(hw, IGC_TIPG, tipg);
464 link.link_duplex = ETH_LINK_HALF_DUPLEX;
465 link.link_status = ETH_LINK_DOWN;
466 link.link_autoneg = ETH_LINK_FIXED;
469 return rte_eth_linkstatus_set(dev, &link);
473 * It executes link_update after knowing an interrupt is present.
476 eth_igc_interrupt_action(struct rte_eth_dev *dev)
478 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
479 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
480 struct rte_eth_link link;
483 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {
484 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
486 /* set get_link_status to check register later */
487 ret = eth_igc_link_update(dev, 0);
489 /* check if link has changed */
493 rte_eth_linkstatus_get(dev, &link);
494 if (link.link_status)
496 " Port %d: Link Up - speed %u Mbps - %s",
498 (unsigned int)link.link_speed,
499 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
500 "full-duplex" : "half-duplex");
502 PMD_DRV_LOG(INFO, " Port %d: Link Down",
505 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
506 pci_dev->addr.domain,
509 pci_dev->addr.function);
510 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
516 * Interrupt handler which shall be registered at first.
519 * Pointer to interrupt handle.
521 * The address of parameter (struct rte_eth_dev *) registered before.
524 eth_igc_interrupt_handler(void *param)
526 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
528 eth_igc_interrupt_get_status(dev);
529 eth_igc_interrupt_action(dev);
532 static void igc_read_queue_stats_register(struct rte_eth_dev *dev);
535 * Update the queue status every IGC_ALARM_INTERVAL time.
537 * The address of parameter (struct rte_eth_dev *) registered before.
540 igc_update_queue_stats_handler(void *param)
542 struct rte_eth_dev *dev = param;
543 igc_read_queue_stats_register(dev);
544 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
545 igc_update_queue_stats_handler, dev);
549 * rx,tx enable/disable
552 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)
554 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
557 tctl = IGC_READ_REG(hw, IGC_TCTL);
558 rctl = IGC_READ_REG(hw, IGC_RCTL);
566 tctl &= ~IGC_TCTL_EN;
567 rctl &= ~IGC_RCTL_EN;
569 IGC_WRITE_REG(hw, IGC_TCTL, tctl);
570 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
575 * This routine disables all traffic on the adapter by issuing a
576 * global reset on the MAC.
579 eth_igc_stop(struct rte_eth_dev *dev)
581 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
582 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
583 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
584 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
585 struct rte_eth_link link;
587 adapter->stopped = 1;
589 /* disable receive and transmit */
590 eth_igc_rxtx_control(dev, false);
592 /* disable all MSI-X interrupts */
593 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
596 /* clear all MSI-X interrupts */
597 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
599 igc_intr_other_disable(dev);
601 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
603 /* disable intr eventfd mapping */
604 rte_intr_disable(intr_handle);
608 /* disable all wake up */
609 IGC_WRITE_REG(hw, IGC_WUC, 0);
611 /* Set bit for Go Link disconnect */
612 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
613 IGC_82580_PM_GO_LINKD);
615 /* Power down the phy. Needed to make the link go Down */
616 eth_igc_set_link_down(dev);
618 igc_dev_clear_queues(dev);
620 /* clear the recorded link status */
621 memset(&link, 0, sizeof(link));
622 rte_eth_linkstatus_set(dev, &link);
624 if (!rte_intr_allow_others(intr_handle))
625 /* resume to the default handler */
626 rte_intr_callback_register(intr_handle,
627 eth_igc_interrupt_handler,
630 /* Clean datapath event and queue/vec mapping */
631 rte_intr_efd_disable(intr_handle);
632 if (intr_handle->intr_vec != NULL) {
633 rte_free(intr_handle->intr_vec);
634 intr_handle->intr_vec = NULL;
639 * write interrupt vector allocation register
641 * board private structure
643 * queue index, valid 0,1,2,3
647 * msix-vector, valid 0,1,2,3,4
650 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index,
651 bool tx, uint8_t msix_vector)
654 uint8_t reg_index = queue_index >> 1;
659 * bit31...24 bit23...16 bit15...8 bit7...0
663 * bit31...24 bit23...16 bit15...8 bit7...0
673 val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index);
676 val &= ~((uint32_t)0xFF << offset);
678 /* write vector and valid bit */
679 val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset;
681 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val);
684 /* Sets up the hardware to generate MSI-X interrupts properly
686 * board private structure
689 igc_configure_msix_intr(struct rte_eth_dev *dev)
691 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
692 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
693 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
696 uint32_t vec = IGC_MISC_VEC_ID;
697 uint32_t base = IGC_MISC_VEC_ID;
698 uint32_t misc_shift = 0;
701 /* won't configure msix register if no mapping is done
702 * between intr vector and event fd
704 if (!rte_intr_dp_is_en(intr_handle))
707 if (rte_intr_allow_others(intr_handle)) {
708 base = IGC_RX_VEC_START;
713 /* turn on MSI-X capability first */
714 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
715 IGC_GPIE_PBA | IGC_GPIE_EIAME |
717 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
720 if (dev->data->dev_conf.intr_conf.lsc)
721 intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC);
723 /* enable msix auto-clear */
724 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask);
726 /* set other cause interrupt vector */
727 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC,
728 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8);
730 /* enable auto-mask */
731 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask);
733 for (i = 0; i < dev->data->nb_rx_queues; i++) {
734 igc_write_ivar(hw, i, 0, vec);
735 intr_handle->intr_vec[i] = vec;
736 if (vec < base + intr_handle->nb_efd - 1)
744 * It enables the interrupt mask and then enable the interrupt.
747 * Pointer to struct rte_eth_dev.
752 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
754 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
757 intr->mask |= IGC_ICR_LSC;
759 intr->mask &= ~IGC_ICR_LSC;
763 * It enables the interrupt.
764 * It will be called once only during nic initialized.
767 igc_rxq_interrupt_setup(struct rte_eth_dev *dev)
770 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
771 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
772 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
773 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
775 /* won't configure msix register if no mapping is done
776 * between intr vector and event fd
778 if (!rte_intr_dp_is_en(intr_handle))
781 mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift;
782 IGC_WRITE_REG(hw, IGC_EIMS, mask);
786 * Get hardware rx-buffer size.
789 igc_get_rx_buffer_size(struct igc_hw *hw)
791 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
795 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
796 * For ASF and Pass Through versions of f/w this means
797 * that the driver is loaded.
800 igc_hw_control_acquire(struct igc_hw *hw)
804 /* Let firmware know the driver has taken over */
805 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
806 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
810 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
811 * For ASF and Pass Through versions of f/w this means that the
812 * driver is no longer loaded.
815 igc_hw_control_release(struct igc_hw *hw)
819 /* Let firmware taken over control of h/w */
820 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
821 IGC_WRITE_REG(hw, IGC_CTRL_EXT,
822 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
826 igc_hardware_init(struct igc_hw *hw)
828 uint32_t rx_buf_size;
831 /* Let the firmware know the OS is in control */
832 igc_hw_control_acquire(hw);
834 /* Issue a global reset */
837 /* disable all wake up */
838 IGC_WRITE_REG(hw, IGC_WUC, 0);
841 * Hardware flow control
842 * - High water mark should allow for at least two standard size (1518)
843 * frames to be received after sending an XOFF.
844 * - Low water mark works best when it is very near the high water mark.
845 * This allows the receiver to restart by sending XON when it has
846 * drained a bit. Here we use an arbitrary value of 1500 which will
847 * restart after one full frame is pulled from the buffer. There
848 * could be several smaller frames in the buffer and if so they will
849 * not trigger the XON until their total number reduces the buffer
852 rx_buf_size = igc_get_rx_buffer_size(hw);
853 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
854 hw->fc.low_water = hw->fc.high_water - 1500;
855 hw->fc.pause_time = IGC_FC_PAUSE_TIME;
857 hw->fc.requested_mode = igc_fc_full;
859 diag = igc_init_hw(hw);
863 igc_get_phy_info(hw);
864 igc_check_for_link(hw);
870 eth_igc_start(struct rte_eth_dev *dev)
872 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
873 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
874 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
875 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
879 PMD_INIT_FUNC_TRACE();
881 /* disable all MSI-X interrupts */
882 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
885 /* clear all MSI-X interrupts */
886 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
888 /* disable uio/vfio intr/eventfd mapping */
889 if (!adapter->stopped)
890 rte_intr_disable(intr_handle);
892 /* Power up the phy. Needed to make the link go Up */
893 eth_igc_set_link_up(dev);
895 /* Put the address into the Receive Address Array */
896 igc_rar_set(hw, hw->mac.addr, 0);
898 /* Initialize the hardware */
899 if (igc_hardware_init(hw)) {
900 PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
903 adapter->stopped = 0;
905 /* check and configure queue intr-vector mapping */
906 if (rte_intr_cap_multiple(intr_handle) &&
907 dev->data->dev_conf.intr_conf.rxq) {
908 uint32_t intr_vector = dev->data->nb_rx_queues;
909 if (rte_intr_efd_enable(intr_handle, intr_vector))
913 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
914 intr_handle->intr_vec = rte_zmalloc("intr_vec",
915 dev->data->nb_rx_queues * sizeof(int), 0);
916 if (intr_handle->intr_vec == NULL) {
918 "Failed to allocate %d rx_queues intr_vec",
919 dev->data->nb_rx_queues);
924 /* configure msix for rx interrupt */
925 igc_configure_msix_intr(dev);
929 /* This can fail when allocating mbufs for descriptor rings */
930 ret = igc_rx_init(dev);
932 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware");
933 igc_dev_clear_queues(dev);
937 igc_clear_hw_cntrs_base_generic(hw);
939 /* Setup link speed and duplex */
940 speeds = &dev->data->dev_conf.link_speeds;
941 if (*speeds == ETH_LINK_SPEED_AUTONEG) {
942 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
946 bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
949 hw->phy.autoneg_advertised = 0;
951 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
952 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
953 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
954 ETH_LINK_SPEED_FIXED)) {
956 goto error_invalid_config;
958 if (*speeds & ETH_LINK_SPEED_10M_HD) {
959 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
962 if (*speeds & ETH_LINK_SPEED_10M) {
963 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
966 if (*speeds & ETH_LINK_SPEED_100M_HD) {
967 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
970 if (*speeds & ETH_LINK_SPEED_100M) {
971 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
974 if (*speeds & ETH_LINK_SPEED_1G) {
975 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
978 if (*speeds & ETH_LINK_SPEED_2_5G) {
979 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
982 if (num_speeds == 0 || (!autoneg && num_speeds > 1))
983 goto error_invalid_config;
985 /* Set/reset the mac.autoneg based on the link speed,
990 hw->mac.forced_speed_duplex =
991 hw->phy.autoneg_advertised;
999 if (rte_intr_allow_others(intr_handle)) {
1000 /* check if lsc interrupt is enabled */
1001 if (dev->data->dev_conf.intr_conf.lsc)
1002 igc_lsc_interrupt_setup(dev, 1);
1004 igc_lsc_interrupt_setup(dev, 0);
1006 rte_intr_callback_unregister(intr_handle,
1007 eth_igc_interrupt_handler,
1009 if (dev->data->dev_conf.intr_conf.lsc)
1011 "LSC won't enable because of no intr multiplex");
1014 /* enable uio/vfio intr/eventfd mapping */
1015 rte_intr_enable(intr_handle);
1017 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1018 igc_update_queue_stats_handler, dev);
1020 /* check if rxq interrupt is enabled */
1021 if (dev->data->dev_conf.intr_conf.rxq &&
1022 rte_intr_dp_is_en(intr_handle))
1023 igc_rxq_interrupt_setup(dev);
1025 /* resume enabled intr since hw reset */
1026 igc_intr_other_enable(dev);
1028 eth_igc_rxtx_control(dev, true);
1029 eth_igc_link_update(dev, 0);
1033 error_invalid_config:
1034 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1035 dev->data->dev_conf.link_speeds, dev->data->port_id);
1036 igc_dev_clear_queues(dev);
1041 igc_reset_swfw_lock(struct igc_hw *hw)
1046 * Do mac ops initialization manually here, since we will need
1047 * some function pointers set by this call.
1049 ret_val = igc_init_mac_params(hw);
1054 * SMBI lock should not fail in this early stage. If this is the case,
1055 * it is due to an improper exit of the application.
1056 * So force the release of the faulty lock.
1058 if (igc_get_hw_semaphore_generic(hw) < 0)
1059 PMD_DRV_LOG(DEBUG, "SMBI lock released");
1061 igc_put_hw_semaphore_generic(hw);
1063 if (hw->mac.ops.acquire_swfw_sync != NULL) {
1067 * Phy lock should not fail in this early stage.
1068 * If this is the case, it is due to an improper exit of the
1069 * application. So force the release of the faulty lock.
1071 mask = IGC_SWFW_PHY0_SM;
1072 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
1073 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
1076 hw->mac.ops.release_swfw_sync(hw, mask);
1079 * This one is more tricky since it is common to all ports; but
1080 * swfw_sync retries last long enough (1s) to be almost sure
1081 * that if lock can not be taken it is due to an improper lock
1084 mask = IGC_SWFW_EEP_SM;
1085 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
1086 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1088 hw->mac.ops.release_swfw_sync(hw, mask);
1095 * free all rx/tx queues.
1098 igc_dev_free_queues(struct rte_eth_dev *dev)
1102 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1103 eth_igc_rx_queue_release(dev->data->rx_queues[i]);
1104 dev->data->rx_queues[i] = NULL;
1106 dev->data->nb_rx_queues = 0;
1108 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1109 eth_igc_tx_queue_release(dev->data->tx_queues[i]);
1110 dev->data->tx_queues[i] = NULL;
1112 dev->data->nb_tx_queues = 0;
1116 eth_igc_close(struct rte_eth_dev *dev)
1118 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1119 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1120 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1121 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
1124 PMD_INIT_FUNC_TRACE();
1126 if (!adapter->stopped)
1129 igc_intr_other_disable(dev);
1131 int ret = rte_intr_callback_unregister(intr_handle,
1132 eth_igc_interrupt_handler, dev);
1133 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
1136 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
1137 DELAY(200 * 1000); /* delay 200ms */
1138 } while (retry++ < 5);
1140 igc_phy_hw_reset(hw);
1141 igc_hw_control_release(hw);
1142 igc_dev_free_queues(dev);
1144 /* Reset any pending lock */
1145 igc_reset_swfw_lock(hw);
1149 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
1151 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1153 hw->vendor_id = pci_dev->id.vendor_id;
1154 hw->device_id = pci_dev->id.device_id;
1155 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1156 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1160 eth_igc_dev_init(struct rte_eth_dev *dev)
1162 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1163 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1164 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1167 PMD_INIT_FUNC_TRACE();
1168 dev->dev_ops = ð_igc_ops;
1171 * for secondary processes, we don't initialize any further as primary
1172 * has already done this work. Only check we don't need a different
1175 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1178 rte_eth_copy_pci_info(dev, pci_dev);
1181 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1183 igc_identify_hardware(dev, pci_dev);
1184 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
1189 igc_get_bus_info(hw);
1191 /* Reset any pending lock */
1192 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
1197 /* Finish initialization */
1198 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
1203 hw->mac.autoneg = 1;
1204 hw->phy.autoneg_wait_to_complete = 0;
1205 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
1207 /* Copper options */
1208 if (hw->phy.media_type == igc_media_type_copper) {
1209 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
1210 hw->phy.disable_polarity_correction = 0;
1211 hw->phy.ms_type = igc_ms_hw_default;
1215 * Start from a known state, this is important in reading the nvm
1216 * and mac from that.
1220 /* Make sure we have a good EEPROM before we read from it */
1221 if (igc_validate_nvm_checksum(hw) < 0) {
1223 * Some PCI-E parts fail the first check due to
1224 * the link being in sleep state, call it again,
1225 * if it fails a second time its a real issue.
1227 if (igc_validate_nvm_checksum(hw) < 0) {
1228 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
1234 /* Read the permanent MAC address out of the EEPROM */
1235 if (igc_read_mac_addr(hw) != 0) {
1236 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
1241 /* Allocate memory for storing MAC addresses */
1242 dev->data->mac_addrs = rte_zmalloc("igc",
1243 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
1244 if (dev->data->mac_addrs == NULL) {
1245 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
1246 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
1251 /* Copy the permanent MAC address */
1252 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1253 &dev->data->mac_addrs[0]);
1255 /* Now initialize the hardware */
1256 if (igc_hardware_init(hw) != 0) {
1257 PMD_INIT_LOG(ERR, "Hardware initialization failed");
1258 rte_free(dev->data->mac_addrs);
1259 dev->data->mac_addrs = NULL;
1264 /* Pass the information to the rte_eth_dev_close() that it should also
1265 * release the private port resources.
1267 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1269 hw->mac.get_link_status = 1;
1272 /* Indicate SOL/IDER usage */
1273 if (igc_check_reset_block(hw) < 0)
1275 "PHY reset is blocked due to SOL/IDER session.");
1277 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
1278 dev->data->port_id, pci_dev->id.vendor_id,
1279 pci_dev->id.device_id);
1281 rte_intr_callback_register(&pci_dev->intr_handle,
1282 eth_igc_interrupt_handler, (void *)dev);
1284 /* enable uio/vfio intr/eventfd mapping */
1285 rte_intr_enable(&pci_dev->intr_handle);
1287 /* enable support intr */
1288 igc_intr_other_enable(dev);
1290 /* initiate queue status */
1291 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1292 igc->txq_stats_map[i] = -1;
1293 igc->rxq_stats_map[i] = -1;
1299 igc_hw_control_release(hw);
1304 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
1306 PMD_INIT_FUNC_TRACE();
1308 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1311 eth_igc_close(eth_dev);
1316 eth_igc_reset(struct rte_eth_dev *dev)
1320 PMD_INIT_FUNC_TRACE();
1322 ret = eth_igc_dev_uninit(dev);
1326 return eth_igc_dev_init(dev);
1330 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
1332 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1335 rctl = IGC_READ_REG(hw, IGC_RCTL);
1336 rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1337 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1342 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
1344 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1347 rctl = IGC_READ_REG(hw, IGC_RCTL);
1348 rctl &= (~IGC_RCTL_UPE);
1349 if (dev->data->all_multicast == 1)
1350 rctl |= IGC_RCTL_MPE;
1352 rctl &= (~IGC_RCTL_MPE);
1353 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1358 eth_igc_allmulticast_enable(struct rte_eth_dev *dev)
1360 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1363 rctl = IGC_READ_REG(hw, IGC_RCTL);
1364 rctl |= IGC_RCTL_MPE;
1365 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1370 eth_igc_allmulticast_disable(struct rte_eth_dev *dev)
1372 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1375 if (dev->data->promiscuous == 1)
1376 return 0; /* must remain in all_multicast mode */
1378 rctl = IGC_READ_REG(hw, IGC_RCTL);
1379 rctl &= (~IGC_RCTL_MPE);
1380 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1385 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1388 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1389 struct igc_fw_version fw;
1392 igc_get_fw_version(hw, &fw);
1394 /* if option rom is valid, display its version too */
1396 ret = snprintf(fw_version, fw_size,
1397 "%d.%d, 0x%08x, %d.%d.%d",
1398 fw.eep_major, fw.eep_minor, fw.etrack_id,
1399 fw.or_major, fw.or_build, fw.or_patch);
1402 if (fw.etrack_id != 0X0000) {
1403 ret = snprintf(fw_version, fw_size,
1405 fw.eep_major, fw.eep_minor,
1408 ret = snprintf(fw_version, fw_size,
1410 fw.eep_major, fw.eep_minor,
1415 ret += 1; /* add the size of '\0' */
1416 if (fw_size < (u32)ret)
1423 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1425 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1427 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1428 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
1429 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1430 dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
1431 dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
1433 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
1434 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
1435 dev_info->max_vmdq_pools = 0;
1437 dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
1438 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1439 dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
1441 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1443 .pthresh = IGC_DEFAULT_RX_PTHRESH,
1444 .hthresh = IGC_DEFAULT_RX_HTHRESH,
1445 .wthresh = IGC_DEFAULT_RX_WTHRESH,
1447 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,
1452 dev_info->default_txconf = (struct rte_eth_txconf) {
1454 .pthresh = IGC_DEFAULT_TX_PTHRESH,
1455 .hthresh = IGC_DEFAULT_TX_HTHRESH,
1456 .wthresh = IGC_DEFAULT_TX_WTHRESH,
1461 dev_info->rx_desc_lim = rx_desc_lim;
1462 dev_info->tx_desc_lim = tx_desc_lim;
1464 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1465 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1466 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
1468 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
1469 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1474 eth_igc_led_on(struct rte_eth_dev *dev)
1476 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1478 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1482 eth_igc_led_off(struct rte_eth_dev *dev)
1484 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1486 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1489 static const uint32_t *
1490 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
1492 static const uint32_t ptypes[] = {
1493 /* refers to rx_desc_pkt_info_to_pkt_type() */
1496 RTE_PTYPE_L3_IPV4_EXT,
1498 RTE_PTYPE_L3_IPV6_EXT,
1502 RTE_PTYPE_TUNNEL_IP,
1503 RTE_PTYPE_INNER_L3_IPV6,
1504 RTE_PTYPE_INNER_L3_IPV6_EXT,
1505 RTE_PTYPE_INNER_L4_TCP,
1506 RTE_PTYPE_INNER_L4_UDP,
1514 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1516 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1517 uint32_t frame_size = mtu + IGC_ETH_OVERHEAD;
1520 /* if extend vlan has been enabled */
1521 if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
1522 frame_size += VLAN_TAG_SIZE;
1524 /* check that mtu is within the allowed range */
1525 if (mtu < RTE_ETHER_MIN_MTU ||
1526 frame_size > MAX_RX_JUMBO_FRAME_SIZE)
1530 * refuse mtu that requires the support of scattered packets when
1531 * this feature has not been enabled before.
1533 if (!dev->data->scattered_rx &&
1534 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1537 rctl = IGC_READ_REG(hw, IGC_RCTL);
1539 /* switch to jumbo mode if needed */
1540 if (mtu > RTE_ETHER_MTU) {
1541 dev->data->dev_conf.rxmode.offloads |=
1542 DEV_RX_OFFLOAD_JUMBO_FRAME;
1543 rctl |= IGC_RCTL_LPE;
1545 dev->data->dev_conf.rxmode.offloads &=
1546 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1547 rctl &= ~IGC_RCTL_LPE;
1549 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1551 /* update max frame size */
1552 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1554 IGC_WRITE_REG(hw, IGC_RLPML,
1555 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1561 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1562 uint32_t index, uint32_t pool)
1564 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1566 igc_rar_set(hw, mac_addr->addr_bytes, index);
1572 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1574 uint8_t addr[RTE_ETHER_ADDR_LEN];
1575 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1577 memset(addr, 0, sizeof(addr));
1578 igc_rar_set(hw, addr, index);
1582 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
1583 struct rte_ether_addr *addr)
1585 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1586 igc_rar_set(hw, addr->addr_bytes, 0);
1591 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
1592 struct rte_ether_addr *mc_addr_set,
1593 uint32_t nb_mc_addr)
1595 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1596 igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
1601 * Read hardware registers
1604 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)
1608 uint64_t old_gprc = stats->gprc;
1609 uint64_t old_gptc = stats->gptc;
1610 uint64_t old_tpr = stats->tpr;
1611 uint64_t old_tpt = stats->tpt;
1612 uint64_t old_rpthc = stats->rpthc;
1613 uint64_t old_hgptc = stats->hgptc;
1615 stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);
1616 stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);
1617 stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);
1618 stats->mpc += IGC_READ_REG(hw, IGC_MPC);
1619 stats->scc += IGC_READ_REG(hw, IGC_SCC);
1620 stats->ecol += IGC_READ_REG(hw, IGC_ECOL);
1622 stats->mcc += IGC_READ_REG(hw, IGC_MCC);
1623 stats->latecol += IGC_READ_REG(hw, IGC_LATECOL);
1624 stats->colc += IGC_READ_REG(hw, IGC_COLC);
1626 stats->dc += IGC_READ_REG(hw, IGC_DC);
1627 stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);
1628 stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);
1629 stats->rlec += IGC_READ_REG(hw, IGC_RLEC);
1630 stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);
1631 stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);
1634 * For watchdog management we need to know if we have been
1635 * paused during the last interval, so capture that here.
1637 pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);
1638 stats->xoffrxc += pause_frames;
1639 stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);
1640 stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);
1641 stats->prc64 += IGC_READ_REG(hw, IGC_PRC64);
1642 stats->prc127 += IGC_READ_REG(hw, IGC_PRC127);
1643 stats->prc255 += IGC_READ_REG(hw, IGC_PRC255);
1644 stats->prc511 += IGC_READ_REG(hw, IGC_PRC511);
1645 stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);
1646 stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);
1647 stats->gprc += IGC_READ_REG(hw, IGC_GPRC);
1648 stats->bprc += IGC_READ_REG(hw, IGC_BPRC);
1649 stats->mprc += IGC_READ_REG(hw, IGC_MPRC);
1650 stats->gptc += IGC_READ_REG(hw, IGC_GPTC);
1652 /* For the 64-bit byte counters the low dword must be read first. */
1653 /* Both registers clear on the read of the high dword */
1655 /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1656 stats->gorc += IGC_READ_REG(hw, IGC_GORCL);
1657 stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);
1658 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1659 stats->gotc += IGC_READ_REG(hw, IGC_GOTCL);
1660 stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);
1661 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1663 stats->rnbc += IGC_READ_REG(hw, IGC_RNBC);
1664 stats->ruc += IGC_READ_REG(hw, IGC_RUC);
1665 stats->rfc += IGC_READ_REG(hw, IGC_RFC);
1666 stats->roc += IGC_READ_REG(hw, IGC_ROC);
1667 stats->rjc += IGC_READ_REG(hw, IGC_RJC);
1669 stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);
1670 stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);
1671 stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);
1672 stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);
1673 stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);
1674 stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);
1675 stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);
1677 stats->tpr += IGC_READ_REG(hw, IGC_TPR);
1678 stats->tpt += IGC_READ_REG(hw, IGC_TPT);
1680 stats->tor += IGC_READ_REG(hw, IGC_TORL);
1681 stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);
1682 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1683 stats->tot += IGC_READ_REG(hw, IGC_TOTL);
1684 stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);
1685 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1687 stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);
1688 stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);
1689 stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);
1690 stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);
1691 stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);
1692 stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);
1693 stats->mptc += IGC_READ_REG(hw, IGC_MPTC);
1694 stats->bptc += IGC_READ_REG(hw, IGC_BPTC);
1695 stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);
1697 stats->iac += IGC_READ_REG(hw, IGC_IAC);
1698 stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);
1699 stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);
1700 stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);
1702 /* Host to Card Statistics */
1703 stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);
1704 stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);
1705 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1706 stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);
1707 stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);
1708 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1709 stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);
1713 * Write 0 to all queue status registers
1716 igc_reset_queue_stats_register(struct igc_hw *hw)
1720 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1721 IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);
1722 IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);
1723 IGC_WRITE_REG(hw, IGC_PQGORC(i), 0);
1724 IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);
1725 IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);
1726 IGC_WRITE_REG(hw, IGC_RQDPC(i), 0);
1727 IGC_WRITE_REG(hw, IGC_TQDPC(i), 0);
1732 * Read all hardware queue status registers
1735 igc_read_queue_stats_register(struct rte_eth_dev *dev)
1737 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1738 struct igc_hw_queue_stats *queue_stats =
1739 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1743 * This register is not cleared on read. Furthermore, the register wraps
1744 * around back to 0x00000000 on the next increment when reaching a value
1745 * of 0xFFFFFFFF and then continues normal count operation.
1747 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1755 * Read the register first, if the value is smaller than that
1756 * previous read, that mean the register has been overflowed,
1757 * then we add the high 4 bytes by 1 and replace the low 4
1758 * bytes by the new value.
1760 tmp = IGC_READ_REG(hw, IGC_PQGPRC(i));
1761 value.ddword = queue_stats->pqgprc[i];
1762 if (value.dword[U32_0_IN_U64] > tmp)
1763 value.dword[U32_1_IN_U64]++;
1764 value.dword[U32_0_IN_U64] = tmp;
1765 queue_stats->pqgprc[i] = value.ddword;
1767 tmp = IGC_READ_REG(hw, IGC_PQGPTC(i));
1768 value.ddword = queue_stats->pqgptc[i];
1769 if (value.dword[U32_0_IN_U64] > tmp)
1770 value.dword[U32_1_IN_U64]++;
1771 value.dword[U32_0_IN_U64] = tmp;
1772 queue_stats->pqgptc[i] = value.ddword;
1774 tmp = IGC_READ_REG(hw, IGC_PQGORC(i));
1775 value.ddword = queue_stats->pqgorc[i];
1776 if (value.dword[U32_0_IN_U64] > tmp)
1777 value.dword[U32_1_IN_U64]++;
1778 value.dword[U32_0_IN_U64] = tmp;
1779 queue_stats->pqgorc[i] = value.ddword;
1781 tmp = IGC_READ_REG(hw, IGC_PQGOTC(i));
1782 value.ddword = queue_stats->pqgotc[i];
1783 if (value.dword[U32_0_IN_U64] > tmp)
1784 value.dword[U32_1_IN_U64]++;
1785 value.dword[U32_0_IN_U64] = tmp;
1786 queue_stats->pqgotc[i] = value.ddword;
1788 tmp = IGC_READ_REG(hw, IGC_PQMPRC(i));
1789 value.ddword = queue_stats->pqmprc[i];
1790 if (value.dword[U32_0_IN_U64] > tmp)
1791 value.dword[U32_1_IN_U64]++;
1792 value.dword[U32_0_IN_U64] = tmp;
1793 queue_stats->pqmprc[i] = value.ddword;
1795 tmp = IGC_READ_REG(hw, IGC_RQDPC(i));
1796 value.ddword = queue_stats->rqdpc[i];
1797 if (value.dword[U32_0_IN_U64] > tmp)
1798 value.dword[U32_1_IN_U64]++;
1799 value.dword[U32_0_IN_U64] = tmp;
1800 queue_stats->rqdpc[i] = value.ddword;
1802 tmp = IGC_READ_REG(hw, IGC_TQDPC(i));
1803 value.ddword = queue_stats->tqdpc[i];
1804 if (value.dword[U32_0_IN_U64] > tmp)
1805 value.dword[U32_1_IN_U64]++;
1806 value.dword[U32_0_IN_U64] = tmp;
1807 queue_stats->tqdpc[i] = value.ddword;
1812 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1814 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1815 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1816 struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev);
1817 struct igc_hw_queue_stats *queue_stats =
1818 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1822 * Cancel status handler since it will read the queue status registers
1824 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1826 /* Read status register */
1827 igc_read_queue_stats_register(dev);
1828 igc_read_stats_registers(hw, stats);
1830 if (rte_stats == NULL) {
1831 /* Restart queue status handler */
1832 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1833 igc_update_queue_stats_handler, dev);
1838 rte_stats->imissed = stats->mpc;
1839 rte_stats->ierrors = stats->crcerrs +
1840 stats->rlec + stats->ruc + stats->roc +
1841 stats->rxerrc + stats->algnerrc;
1844 rte_stats->oerrors = stats->ecol + stats->latecol;
1846 rte_stats->ipackets = stats->gprc;
1847 rte_stats->opackets = stats->gptc;
1848 rte_stats->ibytes = stats->gorc;
1849 rte_stats->obytes = stats->gotc;
1851 /* Get per-queue statuses */
1852 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1853 /* GET TX queue statuses */
1854 int map_id = igc->txq_stats_map[i];
1856 rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i];
1857 rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i];
1859 /* Get RX queue statuses */
1860 map_id = igc->rxq_stats_map[i];
1862 rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i];
1863 rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i];
1864 rte_stats->q_errors[map_id] += queue_stats->rqdpc[i];
1868 /* Restart queue status handler */
1869 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1870 igc_update_queue_stats_handler, dev);
1875 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1878 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1879 struct igc_hw_stats *hw_stats =
1880 IGC_DEV_PRIVATE_STATS(dev);
1883 igc_read_stats_registers(hw, hw_stats);
1885 if (n < IGC_NB_XSTATS)
1886 return IGC_NB_XSTATS;
1888 /* If this is a reset xstats is NULL, and we have cleared the
1889 * registers by reading them.
1894 /* Extended stats */
1895 for (i = 0; i < IGC_NB_XSTATS; i++) {
1897 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1898 rte_igc_stats_strings[i].offset);
1901 return IGC_NB_XSTATS;
1905 eth_igc_xstats_reset(struct rte_eth_dev *dev)
1907 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1908 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
1909 struct igc_hw_queue_stats *queue_stats =
1910 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1912 /* Cancel queue status handler for avoid conflict */
1913 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1915 /* HW registers are cleared on read */
1916 igc_reset_queue_stats_register(hw);
1917 igc_read_stats_registers(hw, hw_stats);
1919 /* Reset software totals */
1920 memset(hw_stats, 0, sizeof(*hw_stats));
1921 memset(queue_stats, 0, sizeof(*queue_stats));
1923 /* Restart the queue status handler */
1924 rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,
1931 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1932 struct rte_eth_xstat_name *xstats_names, unsigned int size)
1936 if (xstats_names == NULL)
1937 return IGC_NB_XSTATS;
1939 if (size < IGC_NB_XSTATS) {
1940 PMD_DRV_LOG(ERR, "not enough buffers!");
1941 return IGC_NB_XSTATS;
1944 for (i = 0; i < IGC_NB_XSTATS; i++)
1945 strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,
1946 sizeof(xstats_names[i].name));
1948 return IGC_NB_XSTATS;
1952 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
1953 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
1959 return eth_igc_xstats_get_names(dev, xstats_names, limit);
1961 for (i = 0; i < limit; i++) {
1962 if (ids[i] >= IGC_NB_XSTATS) {
1963 PMD_DRV_LOG(ERR, "id value isn't valid");
1966 strlcpy(xstats_names[i].name,
1967 rte_igc_stats_strings[ids[i]].name,
1968 sizeof(xstats_names[i].name));
1974 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
1975 uint64_t *values, unsigned int n)
1977 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1978 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
1981 igc_read_stats_registers(hw, hw_stats);
1984 if (n < IGC_NB_XSTATS)
1985 return IGC_NB_XSTATS;
1987 /* If this is a reset xstats is NULL, and we have cleared the
1988 * registers by reading them.
1993 /* Extended stats */
1994 for (i = 0; i < IGC_NB_XSTATS; i++)
1995 values[i] = *(uint64_t *)(((char *)hw_stats) +
1996 rte_igc_stats_strings[i].offset);
1998 return IGC_NB_XSTATS;
2001 for (i = 0; i < n; i++) {
2002 if (ids[i] >= IGC_NB_XSTATS) {
2003 PMD_DRV_LOG(ERR, "id value isn't valid");
2006 values[i] = *(uint64_t *)(((char *)hw_stats) +
2007 rte_igc_stats_strings[ids[i]].offset);
2014 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
2015 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx)
2017 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
2019 /* check queue id is valid */
2020 if (queue_id >= IGC_QUEUE_PAIRS_NUM) {
2021 PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u",
2022 queue_id, IGC_QUEUE_PAIRS_NUM - 1);
2026 /* store the mapping status id */
2028 igc->rxq_stats_map[queue_id] = stat_idx;
2030 igc->txq_stats_map[queue_id] = stat_idx;
2036 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2038 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2039 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2040 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2041 uint32_t vec = IGC_MISC_VEC_ID;
2043 if (rte_intr_allow_others(intr_handle))
2044 vec = IGC_RX_VEC_START;
2046 uint32_t mask = 1u << (queue_id + vec);
2048 IGC_WRITE_REG(hw, IGC_EIMC, mask);
2049 IGC_WRITE_FLUSH(hw);
2055 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2057 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2058 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2059 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2060 uint32_t vec = IGC_MISC_VEC_ID;
2062 if (rte_intr_allow_others(intr_handle))
2063 vec = IGC_RX_VEC_START;
2065 uint32_t mask = 1u << (queue_id + vec);
2067 IGC_WRITE_REG(hw, IGC_EIMS, mask);
2068 IGC_WRITE_FLUSH(hw);
2070 rte_intr_enable(intr_handle);
2076 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2078 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2083 fc_conf->pause_time = hw->fc.pause_time;
2084 fc_conf->high_water = hw->fc.high_water;
2085 fc_conf->low_water = hw->fc.low_water;
2086 fc_conf->send_xon = hw->fc.send_xon;
2087 fc_conf->autoneg = hw->mac.autoneg;
2090 * Return rx_pause and tx_pause status according to actual setting of
2091 * the TFCE and RFCE bits in the CTRL register.
2093 ctrl = IGC_READ_REG(hw, IGC_CTRL);
2094 if (ctrl & IGC_CTRL_TFCE)
2099 if (ctrl & IGC_CTRL_RFCE)
2104 if (rx_pause && tx_pause)
2105 fc_conf->mode = RTE_FC_FULL;
2107 fc_conf->mode = RTE_FC_RX_PAUSE;
2109 fc_conf->mode = RTE_FC_TX_PAUSE;
2111 fc_conf->mode = RTE_FC_NONE;
2117 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2119 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2120 uint32_t rx_buf_size;
2121 uint32_t max_high_water;
2125 if (fc_conf->autoneg != hw->mac.autoneg)
2128 rx_buf_size = igc_get_rx_buffer_size(hw);
2129 PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2131 /* At least reserve one Ethernet frame for watermark */
2132 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
2133 if (fc_conf->high_water > max_high_water ||
2134 fc_conf->high_water < fc_conf->low_water) {
2136 "Incorrect high(%u)/low(%u) water value, max is %u",
2137 fc_conf->high_water, fc_conf->low_water,
2142 switch (fc_conf->mode) {
2144 hw->fc.requested_mode = igc_fc_none;
2146 case RTE_FC_RX_PAUSE:
2147 hw->fc.requested_mode = igc_fc_rx_pause;
2149 case RTE_FC_TX_PAUSE:
2150 hw->fc.requested_mode = igc_fc_tx_pause;
2153 hw->fc.requested_mode = igc_fc_full;
2156 PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode);
2160 hw->fc.pause_time = fc_conf->pause_time;
2161 hw->fc.high_water = fc_conf->high_water;
2162 hw->fc.low_water = fc_conf->low_water;
2163 hw->fc.send_xon = fc_conf->send_xon;
2165 err = igc_setup_link_generic(hw);
2166 if (err == IGC_SUCCESS) {
2168 * check if we want to forward MAC frames - driver doesn't have
2169 * native capability to do that, so we'll write the registers
2172 rctl = IGC_READ_REG(hw, IGC_RCTL);
2174 /* set or clear MFLCN.PMCF bit depending on configuration */
2175 if (fc_conf->mac_ctrl_frame_fwd != 0)
2176 rctl |= IGC_RCTL_PMCF;
2178 rctl &= ~IGC_RCTL_PMCF;
2180 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
2181 IGC_WRITE_FLUSH(hw);
2186 PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err);
2191 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2192 struct rte_pci_device *pci_dev)
2194 PMD_INIT_FUNC_TRACE();
2195 return rte_eth_dev_pci_generic_probe(pci_dev,
2196 sizeof(struct igc_adapter), eth_igc_dev_init);
2200 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
2202 PMD_INIT_FUNC_TRACE();
2203 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
2206 static struct rte_pci_driver rte_igc_pmd = {
2207 .id_table = pci_id_igc_map,
2208 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2209 .probe = eth_igc_pci_probe,
2210 .remove = eth_igc_pci_remove,
2213 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
2214 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
2215 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");