1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
8 #include <rte_string_fns.h>
10 #include <rte_bus_pci.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_malloc.h>
14 #include <rte_alarm.h>
18 #include "igc_filter.h"
21 #define IGC_INTEL_VENDOR_ID 0x8086
24 * The overhead from MTU to max frame size.
25 * Considering VLAN so tag needs to be counted.
27 #define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
28 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
30 #define IGC_FC_PAUSE_TIME 0x0680
31 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
32 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
34 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
35 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
36 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */
37 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */
39 #define IGC_DEFAULT_RX_FREE_THRESH 32
41 #define IGC_DEFAULT_RX_PTHRESH 8
42 #define IGC_DEFAULT_RX_HTHRESH 8
43 #define IGC_DEFAULT_RX_WTHRESH 4
45 #define IGC_DEFAULT_TX_PTHRESH 8
46 #define IGC_DEFAULT_TX_HTHRESH 1
47 #define IGC_DEFAULT_TX_WTHRESH 16
49 /* MSI-X other interrupt vector */
50 #define IGC_MSIX_OTHER_INTR_VEC 0
52 /* External VLAN Enable bit mask */
53 #define IGC_CTRL_EXT_EXT_VLAN (1u << 26)
56 #define IGC_CTRL_SPEED_MASK (7u << 8)
57 #define IGC_CTRL_SPEED_2500 (6u << 8)
59 /* External VLAN Ether Type bit mask and shift */
60 #define IGC_VET_EXT 0xFFFF0000
61 #define IGC_VET_EXT_SHIFT 16
63 /* Force EEE Auto-negotiation */
64 #define IGC_EEER_EEE_FRC_AN (1u << 28)
66 /* Per Queue Good Packets Received Count */
67 #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx))
68 /* Per Queue Good Octets Received Count */
69 #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx))
70 /* Per Queue Good Octets Transmitted Count */
71 #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx))
72 /* Per Queue Multicast Packets Received Count */
73 #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx))
74 /* Transmit Queue Drop Packet Count */
75 #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx))
77 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
78 #define U32_0_IN_U64 0 /* lower bytes of u64 */
79 #define U32_1_IN_U64 1 /* higher bytes of u64 */
81 #define U32_0_IN_U64 1
82 #define U32_1_IN_U64 0
85 #define IGC_ALARM_INTERVAL 8000000u
86 /* us, about 13.6s some per-queue registers will wrap around back to 0. */
88 static const struct rte_eth_desc_lim rx_desc_lim = {
89 .nb_max = IGC_MAX_RXD,
90 .nb_min = IGC_MIN_RXD,
91 .nb_align = IGC_RXD_ALIGN,
94 static const struct rte_eth_desc_lim tx_desc_lim = {
95 .nb_max = IGC_MAX_TXD,
96 .nb_min = IGC_MIN_TXD,
97 .nb_align = IGC_TXD_ALIGN,
98 .nb_seg_max = IGC_TX_MAX_SEG,
99 .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,
102 static const struct rte_pci_id pci_id_igc_map[] = {
103 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
104 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) },
105 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) },
106 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) },
107 { .vendor_id = 0, /* sentinel */ },
110 /* store statistics names and its offset in stats structure */
111 struct rte_igc_xstats_name_off {
112 char name[RTE_ETH_XSTATS_NAME_SIZE];
116 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {
117 {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)},
118 {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)},
119 {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)},
120 {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)},
121 {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)},
122 {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)},
123 {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats,
125 {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)},
126 {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)},
127 {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)},
128 {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)},
129 {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)},
130 {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)},
131 {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)},
132 {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)},
133 {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)},
134 {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)},
135 {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats,
137 {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)},
138 {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)},
139 {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)},
140 {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)},
141 {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
143 {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats,
145 {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)},
146 {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)},
147 {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)},
148 {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)},
149 {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)},
150 {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)},
151 {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)},
152 {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)},
153 {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)},
154 {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)},
155 {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)},
156 {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)},
157 {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)},
158 {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)},
159 {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)},
160 {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)},
161 {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)},
162 {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)},
163 {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
165 {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats,
167 {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)},
168 {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)},
169 {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)},
170 {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)},
171 {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)},
172 {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)},
173 {"rx_descriptor_lower_threshold",
174 offsetof(struct igc_hw_stats, icrxdmtc)},
177 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \
178 sizeof(rte_igc_stats_strings[0]))
180 static int eth_igc_configure(struct rte_eth_dev *dev);
181 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
182 static void eth_igc_stop(struct rte_eth_dev *dev);
183 static int eth_igc_start(struct rte_eth_dev *dev);
184 static int eth_igc_set_link_up(struct rte_eth_dev *dev);
185 static int eth_igc_set_link_down(struct rte_eth_dev *dev);
186 static int eth_igc_close(struct rte_eth_dev *dev);
187 static int eth_igc_reset(struct rte_eth_dev *dev);
188 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
189 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
190 static int eth_igc_fw_version_get(struct rte_eth_dev *dev,
191 char *fw_version, size_t fw_size);
192 static int eth_igc_infos_get(struct rte_eth_dev *dev,
193 struct rte_eth_dev_info *dev_info);
194 static int eth_igc_led_on(struct rte_eth_dev *dev);
195 static int eth_igc_led_off(struct rte_eth_dev *dev);
196 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);
197 static int eth_igc_rar_set(struct rte_eth_dev *dev,
198 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);
199 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);
200 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
201 struct rte_ether_addr *addr);
202 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
203 struct rte_ether_addr *mc_addr_set,
204 uint32_t nb_mc_addr);
205 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);
206 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);
207 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
208 static int eth_igc_stats_get(struct rte_eth_dev *dev,
209 struct rte_eth_stats *rte_stats);
210 static int eth_igc_xstats_get(struct rte_eth_dev *dev,
211 struct rte_eth_xstat *xstats, unsigned int n);
212 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev,
214 uint64_t *values, unsigned int n);
215 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,
216 struct rte_eth_xstat_name *xstats_names,
218 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
219 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
221 static int eth_igc_xstats_reset(struct rte_eth_dev *dev);
223 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
224 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx);
226 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
228 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
230 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
232 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
233 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev,
234 struct rte_eth_rss_reta_entry64 *reta_conf,
236 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev,
237 struct rte_eth_rss_reta_entry64 *reta_conf,
239 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev,
240 struct rte_eth_rss_conf *rss_conf);
241 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
242 struct rte_eth_rss_conf *rss_conf);
244 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
245 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
246 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
247 enum rte_vlan_type vlan_type, uint16_t tpid);
249 static const struct eth_dev_ops eth_igc_ops = {
250 .dev_configure = eth_igc_configure,
251 .link_update = eth_igc_link_update,
252 .dev_stop = eth_igc_stop,
253 .dev_start = eth_igc_start,
254 .dev_close = eth_igc_close,
255 .dev_reset = eth_igc_reset,
256 .dev_set_link_up = eth_igc_set_link_up,
257 .dev_set_link_down = eth_igc_set_link_down,
258 .promiscuous_enable = eth_igc_promiscuous_enable,
259 .promiscuous_disable = eth_igc_promiscuous_disable,
260 .allmulticast_enable = eth_igc_allmulticast_enable,
261 .allmulticast_disable = eth_igc_allmulticast_disable,
262 .fw_version_get = eth_igc_fw_version_get,
263 .dev_infos_get = eth_igc_infos_get,
264 .dev_led_on = eth_igc_led_on,
265 .dev_led_off = eth_igc_led_off,
266 .dev_supported_ptypes_get = eth_igc_supported_ptypes_get,
267 .mtu_set = eth_igc_mtu_set,
268 .mac_addr_add = eth_igc_rar_set,
269 .mac_addr_remove = eth_igc_rar_clear,
270 .mac_addr_set = eth_igc_default_mac_addr_set,
271 .set_mc_addr_list = eth_igc_set_mc_addr_list,
273 .rx_queue_setup = eth_igc_rx_queue_setup,
274 .rx_queue_release = eth_igc_rx_queue_release,
275 .tx_queue_setup = eth_igc_tx_queue_setup,
276 .tx_queue_release = eth_igc_tx_queue_release,
277 .tx_done_cleanup = eth_igc_tx_done_cleanup,
278 .rxq_info_get = eth_igc_rxq_info_get,
279 .txq_info_get = eth_igc_txq_info_get,
280 .stats_get = eth_igc_stats_get,
281 .xstats_get = eth_igc_xstats_get,
282 .xstats_get_by_id = eth_igc_xstats_get_by_id,
283 .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id,
284 .xstats_get_names = eth_igc_xstats_get_names,
285 .stats_reset = eth_igc_xstats_reset,
286 .xstats_reset = eth_igc_xstats_reset,
287 .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,
288 .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable,
289 .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable,
290 .flow_ctrl_get = eth_igc_flow_ctrl_get,
291 .flow_ctrl_set = eth_igc_flow_ctrl_set,
292 .reta_update = eth_igc_rss_reta_update,
293 .reta_query = eth_igc_rss_reta_query,
294 .rss_hash_update = eth_igc_rss_hash_update,
295 .rss_hash_conf_get = eth_igc_rss_hash_conf_get,
296 .vlan_filter_set = eth_igc_vlan_filter_set,
297 .vlan_offload_set = eth_igc_vlan_offload_set,
298 .vlan_tpid_set = eth_igc_vlan_tpid_set,
299 .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set,
300 .filter_ctrl = eth_igc_filter_ctrl,
304 * multiple queue mode checking
307 igc_check_mq_mode(struct rte_eth_dev *dev)
309 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
310 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
312 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
313 PMD_INIT_LOG(ERR, "SRIOV is not supported.");
317 if (rx_mq_mode != ETH_MQ_RX_NONE &&
318 rx_mq_mode != ETH_MQ_RX_RSS) {
319 /* RSS together with VMDq not supported*/
320 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
325 /* To no break software that set invalid mode, only display
326 * warning if invalid mode is used.
328 if (tx_mq_mode != ETH_MQ_TX_NONE)
329 PMD_INIT_LOG(WARNING,
330 "TX mode %d is not supported. Due to meaningless in this driver, just ignore",
337 eth_igc_configure(struct rte_eth_dev *dev)
339 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
342 PMD_INIT_FUNC_TRACE();
344 ret = igc_check_mq_mode(dev);
348 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
353 eth_igc_set_link_up(struct rte_eth_dev *dev)
355 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
357 if (hw->phy.media_type == igc_media_type_copper)
358 igc_power_up_phy(hw);
360 igc_power_up_fiber_serdes_link(hw);
365 eth_igc_set_link_down(struct rte_eth_dev *dev)
367 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
369 if (hw->phy.media_type == igc_media_type_copper)
370 igc_power_down_phy(hw);
372 igc_shutdown_fiber_serdes_link(hw);
377 * disable other interrupt
380 igc_intr_other_disable(struct rte_eth_dev *dev)
382 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
383 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
384 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
386 if (rte_intr_allow_others(intr_handle) &&
387 dev->data->dev_conf.intr_conf.lsc) {
388 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC);
391 IGC_WRITE_REG(hw, IGC_IMC, ~0);
396 * enable other interrupt
399 igc_intr_other_enable(struct rte_eth_dev *dev)
401 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
402 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
403 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
404 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
406 if (rte_intr_allow_others(intr_handle) &&
407 dev->data->dev_conf.intr_conf.lsc) {
408 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC);
411 IGC_WRITE_REG(hw, IGC_IMS, intr->mask);
416 * It reads ICR and gets interrupt causes, check it and set a bit flag
417 * to update link status.
420 eth_igc_interrupt_get_status(struct rte_eth_dev *dev)
423 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
424 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
426 /* read-on-clear nic registers here */
427 icr = IGC_READ_REG(hw, IGC_ICR);
430 if (icr & IGC_ICR_LSC)
431 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
434 /* return 0 means link status changed, -1 means not changed */
436 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
438 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
439 struct rte_eth_link link;
440 int link_check, count;
443 hw->mac.get_link_status = 1;
445 /* possible wait-to-complete in up to 9 seconds */
446 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {
447 /* Read the real link status */
448 switch (hw->phy.media_type) {
449 case igc_media_type_copper:
450 /* Do the work to read phy */
451 igc_check_for_link(hw);
452 link_check = !hw->mac.get_link_status;
455 case igc_media_type_fiber:
456 igc_check_for_link(hw);
457 link_check = (IGC_READ_REG(hw, IGC_STATUS) &
461 case igc_media_type_internal_serdes:
462 igc_check_for_link(hw);
463 link_check = hw->mac.serdes_has_link;
469 if (link_check || wait_to_complete == 0)
471 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);
473 memset(&link, 0, sizeof(link));
475 /* Now we check if a transition has happened */
477 uint16_t duplex, speed;
478 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
479 link.link_duplex = (duplex == FULL_DUPLEX) ?
480 ETH_LINK_FULL_DUPLEX :
481 ETH_LINK_HALF_DUPLEX;
482 link.link_speed = speed;
483 link.link_status = ETH_LINK_UP;
484 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
485 ETH_LINK_SPEED_FIXED);
487 if (speed == SPEED_2500) {
488 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
489 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {
490 tipg &= ~IGC_TIPG_IPGT_MASK;
492 IGC_WRITE_REG(hw, IGC_TIPG, tipg);
497 link.link_duplex = ETH_LINK_HALF_DUPLEX;
498 link.link_status = ETH_LINK_DOWN;
499 link.link_autoneg = ETH_LINK_FIXED;
502 return rte_eth_linkstatus_set(dev, &link);
506 * It executes link_update after knowing an interrupt is present.
509 eth_igc_interrupt_action(struct rte_eth_dev *dev)
511 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
512 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
513 struct rte_eth_link link;
516 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {
517 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
519 /* set get_link_status to check register later */
520 ret = eth_igc_link_update(dev, 0);
522 /* check if link has changed */
526 rte_eth_linkstatus_get(dev, &link);
527 if (link.link_status)
529 " Port %d: Link Up - speed %u Mbps - %s",
531 (unsigned int)link.link_speed,
532 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
533 "full-duplex" : "half-duplex");
535 PMD_DRV_LOG(INFO, " Port %d: Link Down",
538 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
539 pci_dev->addr.domain,
542 pci_dev->addr.function);
543 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
548 * Interrupt handler which shall be registered at first.
551 * Pointer to interrupt handle.
553 * The address of parameter (struct rte_eth_dev *) registered before.
556 eth_igc_interrupt_handler(void *param)
558 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
560 eth_igc_interrupt_get_status(dev);
561 eth_igc_interrupt_action(dev);
564 static void igc_read_queue_stats_register(struct rte_eth_dev *dev);
567 * Update the queue status every IGC_ALARM_INTERVAL time.
569 * The address of parameter (struct rte_eth_dev *) registered before.
572 igc_update_queue_stats_handler(void *param)
574 struct rte_eth_dev *dev = param;
575 igc_read_queue_stats_register(dev);
576 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
577 igc_update_queue_stats_handler, dev);
581 * rx,tx enable/disable
584 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)
586 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
589 tctl = IGC_READ_REG(hw, IGC_TCTL);
590 rctl = IGC_READ_REG(hw, IGC_RCTL);
598 tctl &= ~IGC_TCTL_EN;
599 rctl &= ~IGC_RCTL_EN;
601 IGC_WRITE_REG(hw, IGC_TCTL, tctl);
602 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
607 * This routine disables all traffic on the adapter by issuing a
608 * global reset on the MAC.
611 eth_igc_stop(struct rte_eth_dev *dev)
613 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
614 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
615 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
616 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
617 struct rte_eth_link link;
619 adapter->stopped = 1;
621 /* disable receive and transmit */
622 eth_igc_rxtx_control(dev, false);
624 /* disable all MSI-X interrupts */
625 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
628 /* clear all MSI-X interrupts */
629 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
631 igc_intr_other_disable(dev);
633 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
635 /* disable intr eventfd mapping */
636 rte_intr_disable(intr_handle);
640 /* disable all wake up */
641 IGC_WRITE_REG(hw, IGC_WUC, 0);
643 /* disable checking EEE operation in MAC loopback mode */
644 igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
646 /* Set bit for Go Link disconnect */
647 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
648 IGC_82580_PM_GO_LINKD);
650 /* Power down the phy. Needed to make the link go Down */
651 eth_igc_set_link_down(dev);
653 igc_dev_clear_queues(dev);
655 /* clear the recorded link status */
656 memset(&link, 0, sizeof(link));
657 rte_eth_linkstatus_set(dev, &link);
659 if (!rte_intr_allow_others(intr_handle))
660 /* resume to the default handler */
661 rte_intr_callback_register(intr_handle,
662 eth_igc_interrupt_handler,
665 /* Clean datapath event and queue/vec mapping */
666 rte_intr_efd_disable(intr_handle);
667 if (intr_handle->intr_vec != NULL) {
668 rte_free(intr_handle->intr_vec);
669 intr_handle->intr_vec = NULL;
674 * write interrupt vector allocation register
676 * board private structure
678 * queue index, valid 0,1,2,3
682 * msix-vector, valid 0,1,2,3,4
685 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index,
686 bool tx, uint8_t msix_vector)
689 uint8_t reg_index = queue_index >> 1;
694 * bit31...24 bit23...16 bit15...8 bit7...0
698 * bit31...24 bit23...16 bit15...8 bit7...0
708 val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index);
711 val &= ~((uint32_t)0xFF << offset);
713 /* write vector and valid bit */
714 val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset;
716 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val);
719 /* Sets up the hardware to generate MSI-X interrupts properly
721 * board private structure
724 igc_configure_msix_intr(struct rte_eth_dev *dev)
726 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
727 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
728 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
731 uint32_t vec = IGC_MISC_VEC_ID;
732 uint32_t base = IGC_MISC_VEC_ID;
733 uint32_t misc_shift = 0;
736 /* won't configure msix register if no mapping is done
737 * between intr vector and event fd
739 if (!rte_intr_dp_is_en(intr_handle))
742 if (rte_intr_allow_others(intr_handle)) {
743 base = IGC_RX_VEC_START;
748 /* turn on MSI-X capability first */
749 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
750 IGC_GPIE_PBA | IGC_GPIE_EIAME |
752 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
755 if (dev->data->dev_conf.intr_conf.lsc)
756 intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC);
758 /* enable msix auto-clear */
759 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask);
761 /* set other cause interrupt vector */
762 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC,
763 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8);
765 /* enable auto-mask */
766 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask);
768 for (i = 0; i < dev->data->nb_rx_queues; i++) {
769 igc_write_ivar(hw, i, 0, vec);
770 intr_handle->intr_vec[i] = vec;
771 if (vec < base + intr_handle->nb_efd - 1)
779 * It enables the interrupt mask and then enable the interrupt.
782 * Pointer to struct rte_eth_dev.
787 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
789 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
792 intr->mask |= IGC_ICR_LSC;
794 intr->mask &= ~IGC_ICR_LSC;
798 * It enables the interrupt.
799 * It will be called once only during nic initialized.
802 igc_rxq_interrupt_setup(struct rte_eth_dev *dev)
805 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
806 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
807 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
808 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
810 /* won't configure msix register if no mapping is done
811 * between intr vector and event fd
813 if (!rte_intr_dp_is_en(intr_handle))
816 mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift;
817 IGC_WRITE_REG(hw, IGC_EIMS, mask);
821 * Get hardware rx-buffer size.
824 igc_get_rx_buffer_size(struct igc_hw *hw)
826 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
830 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
831 * For ASF and Pass Through versions of f/w this means
832 * that the driver is loaded.
835 igc_hw_control_acquire(struct igc_hw *hw)
839 /* Let firmware know the driver has taken over */
840 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
841 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
845 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
846 * For ASF and Pass Through versions of f/w this means that the
847 * driver is no longer loaded.
850 igc_hw_control_release(struct igc_hw *hw)
854 /* Let firmware taken over control of h/w */
855 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
856 IGC_WRITE_REG(hw, IGC_CTRL_EXT,
857 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
861 igc_hardware_init(struct igc_hw *hw)
863 uint32_t rx_buf_size;
866 /* Let the firmware know the OS is in control */
867 igc_hw_control_acquire(hw);
869 /* Issue a global reset */
872 /* disable all wake up */
873 IGC_WRITE_REG(hw, IGC_WUC, 0);
876 * Hardware flow control
877 * - High water mark should allow for at least two standard size (1518)
878 * frames to be received after sending an XOFF.
879 * - Low water mark works best when it is very near the high water mark.
880 * This allows the receiver to restart by sending XON when it has
881 * drained a bit. Here we use an arbitrary value of 1500 which will
882 * restart after one full frame is pulled from the buffer. There
883 * could be several smaller frames in the buffer and if so they will
884 * not trigger the XON until their total number reduces the buffer
887 rx_buf_size = igc_get_rx_buffer_size(hw);
888 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
889 hw->fc.low_water = hw->fc.high_water - 1500;
890 hw->fc.pause_time = IGC_FC_PAUSE_TIME;
892 hw->fc.requested_mode = igc_fc_full;
894 diag = igc_init_hw(hw);
898 igc_get_phy_info(hw);
899 igc_check_for_link(hw);
905 eth_igc_start(struct rte_eth_dev *dev)
907 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
908 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
909 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
910 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
914 PMD_INIT_FUNC_TRACE();
916 /* disable all MSI-X interrupts */
917 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
920 /* clear all MSI-X interrupts */
921 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
923 /* disable uio/vfio intr/eventfd mapping */
924 if (!adapter->stopped)
925 rte_intr_disable(intr_handle);
927 /* Power up the phy. Needed to make the link go Up */
928 eth_igc_set_link_up(dev);
930 /* Put the address into the Receive Address Array */
931 igc_rar_set(hw, hw->mac.addr, 0);
933 /* Initialize the hardware */
934 if (igc_hardware_init(hw)) {
935 PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
938 adapter->stopped = 0;
940 /* check and configure queue intr-vector mapping */
941 if (rte_intr_cap_multiple(intr_handle) &&
942 dev->data->dev_conf.intr_conf.rxq) {
943 uint32_t intr_vector = dev->data->nb_rx_queues;
944 if (rte_intr_efd_enable(intr_handle, intr_vector))
948 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
949 intr_handle->intr_vec = rte_zmalloc("intr_vec",
950 dev->data->nb_rx_queues * sizeof(int), 0);
951 if (intr_handle->intr_vec == NULL) {
953 "Failed to allocate %d rx_queues intr_vec",
954 dev->data->nb_rx_queues);
959 /* configure msix for rx interrupt */
960 igc_configure_msix_intr(dev);
964 /* This can fail when allocating mbufs for descriptor rings */
965 ret = igc_rx_init(dev);
967 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware");
968 igc_dev_clear_queues(dev);
972 igc_clear_hw_cntrs_base_generic(hw);
974 /* VLAN Offload Settings */
975 eth_igc_vlan_offload_set(dev,
976 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
977 ETH_VLAN_EXTEND_MASK);
979 /* Setup link speed and duplex */
980 speeds = &dev->data->dev_conf.link_speeds;
981 if (*speeds == ETH_LINK_SPEED_AUTONEG) {
982 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
986 bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
989 hw->phy.autoneg_advertised = 0;
991 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
992 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
993 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
994 ETH_LINK_SPEED_FIXED)) {
996 goto error_invalid_config;
998 if (*speeds & ETH_LINK_SPEED_10M_HD) {
999 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1002 if (*speeds & ETH_LINK_SPEED_10M) {
1003 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1006 if (*speeds & ETH_LINK_SPEED_100M_HD) {
1007 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1010 if (*speeds & ETH_LINK_SPEED_100M) {
1011 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1014 if (*speeds & ETH_LINK_SPEED_1G) {
1015 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1018 if (*speeds & ETH_LINK_SPEED_2_5G) {
1019 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
1022 if (num_speeds == 0 || (!autoneg && num_speeds > 1))
1023 goto error_invalid_config;
1025 /* Set/reset the mac.autoneg based on the link speed,
1029 hw->mac.autoneg = 0;
1030 hw->mac.forced_speed_duplex =
1031 hw->phy.autoneg_advertised;
1033 hw->mac.autoneg = 1;
1039 if (rte_intr_allow_others(intr_handle)) {
1040 /* check if lsc interrupt is enabled */
1041 if (dev->data->dev_conf.intr_conf.lsc)
1042 igc_lsc_interrupt_setup(dev, 1);
1044 igc_lsc_interrupt_setup(dev, 0);
1046 rte_intr_callback_unregister(intr_handle,
1047 eth_igc_interrupt_handler,
1049 if (dev->data->dev_conf.intr_conf.lsc)
1051 "LSC won't enable because of no intr multiplex");
1054 /* enable uio/vfio intr/eventfd mapping */
1055 rte_intr_enable(intr_handle);
1057 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1058 igc_update_queue_stats_handler, dev);
1060 /* check if rxq interrupt is enabled */
1061 if (dev->data->dev_conf.intr_conf.rxq &&
1062 rte_intr_dp_is_en(intr_handle))
1063 igc_rxq_interrupt_setup(dev);
1065 /* resume enabled intr since hw reset */
1066 igc_intr_other_enable(dev);
1068 eth_igc_rxtx_control(dev, true);
1069 eth_igc_link_update(dev, 0);
1071 /* configure MAC-loopback mode */
1072 if (dev->data->dev_conf.lpbk_mode == 1) {
1075 reg_val = IGC_READ_REG(hw, IGC_CTRL);
1076 reg_val &= ~IGC_CTRL_SPEED_MASK;
1077 reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD |
1078 IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500;
1079 IGC_WRITE_REG(hw, IGC_CTRL, reg_val);
1081 igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
1086 error_invalid_config:
1087 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1088 dev->data->dev_conf.link_speeds, dev->data->port_id);
1089 igc_dev_clear_queues(dev);
1094 igc_reset_swfw_lock(struct igc_hw *hw)
1099 * Do mac ops initialization manually here, since we will need
1100 * some function pointers set by this call.
1102 ret_val = igc_init_mac_params(hw);
1107 * SMBI lock should not fail in this early stage. If this is the case,
1108 * it is due to an improper exit of the application.
1109 * So force the release of the faulty lock.
1111 if (igc_get_hw_semaphore_generic(hw) < 0)
1112 PMD_DRV_LOG(DEBUG, "SMBI lock released");
1114 igc_put_hw_semaphore_generic(hw);
1116 if (hw->mac.ops.acquire_swfw_sync != NULL) {
1120 * Phy lock should not fail in this early stage.
1121 * If this is the case, it is due to an improper exit of the
1122 * application. So force the release of the faulty lock.
1124 mask = IGC_SWFW_PHY0_SM;
1125 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
1126 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
1129 hw->mac.ops.release_swfw_sync(hw, mask);
1132 * This one is more tricky since it is common to all ports; but
1133 * swfw_sync retries last long enough (1s) to be almost sure
1134 * that if lock can not be taken it is due to an improper lock
1137 mask = IGC_SWFW_EEP_SM;
1138 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
1139 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1141 hw->mac.ops.release_swfw_sync(hw, mask);
1148 * free all rx/tx queues.
1151 igc_dev_free_queues(struct rte_eth_dev *dev)
1155 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1156 eth_igc_rx_queue_release(dev->data->rx_queues[i]);
1157 dev->data->rx_queues[i] = NULL;
1159 dev->data->nb_rx_queues = 0;
1161 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1162 eth_igc_tx_queue_release(dev->data->tx_queues[i]);
1163 dev->data->tx_queues[i] = NULL;
1165 dev->data->nb_tx_queues = 0;
1169 eth_igc_close(struct rte_eth_dev *dev)
1171 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1172 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1173 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1174 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
1177 PMD_INIT_FUNC_TRACE();
1178 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1181 if (!adapter->stopped)
1184 igc_flow_flush(dev, NULL);
1185 igc_clear_all_filter(dev);
1187 igc_intr_other_disable(dev);
1189 int ret = rte_intr_callback_unregister(intr_handle,
1190 eth_igc_interrupt_handler, dev);
1191 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
1194 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
1195 DELAY(200 * 1000); /* delay 200ms */
1196 } while (retry++ < 5);
1198 igc_phy_hw_reset(hw);
1199 igc_hw_control_release(hw);
1200 igc_dev_free_queues(dev);
1202 /* Reset any pending lock */
1203 igc_reset_swfw_lock(hw);
1209 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
1211 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1213 hw->vendor_id = pci_dev->id.vendor_id;
1214 hw->device_id = pci_dev->id.device_id;
1215 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1216 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1220 eth_igc_dev_init(struct rte_eth_dev *dev)
1222 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1223 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1224 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1227 PMD_INIT_FUNC_TRACE();
1228 dev->dev_ops = ð_igc_ops;
1229 dev->rx_descriptor_done = eth_igc_rx_descriptor_done;
1230 dev->rx_queue_count = eth_igc_rx_queue_count;
1231 dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
1232 dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
1235 * for secondary processes, we don't initialize any further as primary
1236 * has already done this work. Only check we don't need a different
1239 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1242 rte_eth_copy_pci_info(dev, pci_dev);
1245 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1247 igc_identify_hardware(dev, pci_dev);
1248 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
1253 igc_get_bus_info(hw);
1255 /* Reset any pending lock */
1256 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
1261 /* Finish initialization */
1262 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
1267 hw->mac.autoneg = 1;
1268 hw->phy.autoneg_wait_to_complete = 0;
1269 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
1271 /* Copper options */
1272 if (hw->phy.media_type == igc_media_type_copper) {
1273 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
1274 hw->phy.disable_polarity_correction = 0;
1275 hw->phy.ms_type = igc_ms_hw_default;
1279 * Start from a known state, this is important in reading the nvm
1280 * and mac from that.
1284 /* Make sure we have a good EEPROM before we read from it */
1285 if (igc_validate_nvm_checksum(hw) < 0) {
1287 * Some PCI-E parts fail the first check due to
1288 * the link being in sleep state, call it again,
1289 * if it fails a second time its a real issue.
1291 if (igc_validate_nvm_checksum(hw) < 0) {
1292 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
1298 /* Read the permanent MAC address out of the EEPROM */
1299 if (igc_read_mac_addr(hw) != 0) {
1300 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
1305 /* Allocate memory for storing MAC addresses */
1306 dev->data->mac_addrs = rte_zmalloc("igc",
1307 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
1308 if (dev->data->mac_addrs == NULL) {
1309 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
1310 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
1315 /* Copy the permanent MAC address */
1316 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1317 &dev->data->mac_addrs[0]);
1319 /* Now initialize the hardware */
1320 if (igc_hardware_init(hw) != 0) {
1321 PMD_INIT_LOG(ERR, "Hardware initialization failed");
1322 rte_free(dev->data->mac_addrs);
1323 dev->data->mac_addrs = NULL;
1328 hw->mac.get_link_status = 1;
1331 /* Indicate SOL/IDER usage */
1332 if (igc_check_reset_block(hw) < 0)
1334 "PHY reset is blocked due to SOL/IDER session.");
1336 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
1337 dev->data->port_id, pci_dev->id.vendor_id,
1338 pci_dev->id.device_id);
1340 rte_intr_callback_register(&pci_dev->intr_handle,
1341 eth_igc_interrupt_handler, (void *)dev);
1343 /* enable uio/vfio intr/eventfd mapping */
1344 rte_intr_enable(&pci_dev->intr_handle);
1346 /* enable support intr */
1347 igc_intr_other_enable(dev);
1349 /* initiate queue status */
1350 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1351 igc->txq_stats_map[i] = -1;
1352 igc->rxq_stats_map[i] = -1;
1356 igc_clear_all_filter(dev);
1360 igc_hw_control_release(hw);
1365 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
1367 PMD_INIT_FUNC_TRACE();
1368 eth_igc_close(eth_dev);
1373 eth_igc_reset(struct rte_eth_dev *dev)
1377 PMD_INIT_FUNC_TRACE();
1379 ret = eth_igc_dev_uninit(dev);
1383 return eth_igc_dev_init(dev);
1387 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
1389 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1392 rctl = IGC_READ_REG(hw, IGC_RCTL);
1393 rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1394 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1399 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
1401 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1404 rctl = IGC_READ_REG(hw, IGC_RCTL);
1405 rctl &= (~IGC_RCTL_UPE);
1406 if (dev->data->all_multicast == 1)
1407 rctl |= IGC_RCTL_MPE;
1409 rctl &= (~IGC_RCTL_MPE);
1410 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1415 eth_igc_allmulticast_enable(struct rte_eth_dev *dev)
1417 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1420 rctl = IGC_READ_REG(hw, IGC_RCTL);
1421 rctl |= IGC_RCTL_MPE;
1422 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1427 eth_igc_allmulticast_disable(struct rte_eth_dev *dev)
1429 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1432 if (dev->data->promiscuous == 1)
1433 return 0; /* must remain in all_multicast mode */
1435 rctl = IGC_READ_REG(hw, IGC_RCTL);
1436 rctl &= (~IGC_RCTL_MPE);
1437 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1442 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1445 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1446 struct igc_fw_version fw;
1449 igc_get_fw_version(hw, &fw);
1451 /* if option rom is valid, display its version too */
1453 ret = snprintf(fw_version, fw_size,
1454 "%d.%d, 0x%08x, %d.%d.%d",
1455 fw.eep_major, fw.eep_minor, fw.etrack_id,
1456 fw.or_major, fw.or_build, fw.or_patch);
1459 if (fw.etrack_id != 0X0000) {
1460 ret = snprintf(fw_version, fw_size,
1462 fw.eep_major, fw.eep_minor,
1465 ret = snprintf(fw_version, fw_size,
1467 fw.eep_major, fw.eep_minor,
1472 ret += 1; /* add the size of '\0' */
1473 if (fw_size < (u32)ret)
1480 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1482 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1484 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1485 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
1486 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1487 dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
1488 dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
1489 dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1491 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
1492 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
1493 dev_info->max_vmdq_pools = 0;
1495 dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
1496 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1497 dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
1499 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1501 .pthresh = IGC_DEFAULT_RX_PTHRESH,
1502 .hthresh = IGC_DEFAULT_RX_HTHRESH,
1503 .wthresh = IGC_DEFAULT_RX_WTHRESH,
1505 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,
1510 dev_info->default_txconf = (struct rte_eth_txconf) {
1512 .pthresh = IGC_DEFAULT_TX_PTHRESH,
1513 .hthresh = IGC_DEFAULT_TX_HTHRESH,
1514 .wthresh = IGC_DEFAULT_TX_WTHRESH,
1519 dev_info->rx_desc_lim = rx_desc_lim;
1520 dev_info->tx_desc_lim = tx_desc_lim;
1522 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1523 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1524 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
1526 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
1527 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1532 eth_igc_led_on(struct rte_eth_dev *dev)
1534 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1536 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1540 eth_igc_led_off(struct rte_eth_dev *dev)
1542 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1544 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1547 static const uint32_t *
1548 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
1550 static const uint32_t ptypes[] = {
1551 /* refers to rx_desc_pkt_info_to_pkt_type() */
1554 RTE_PTYPE_L3_IPV4_EXT,
1556 RTE_PTYPE_L3_IPV6_EXT,
1560 RTE_PTYPE_TUNNEL_IP,
1561 RTE_PTYPE_INNER_L3_IPV6,
1562 RTE_PTYPE_INNER_L3_IPV6_EXT,
1563 RTE_PTYPE_INNER_L4_TCP,
1564 RTE_PTYPE_INNER_L4_UDP,
1572 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1574 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1575 uint32_t frame_size = mtu + IGC_ETH_OVERHEAD;
1578 /* if extend vlan has been enabled */
1579 if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
1580 frame_size += VLAN_TAG_SIZE;
1582 /* check that mtu is within the allowed range */
1583 if (mtu < RTE_ETHER_MIN_MTU ||
1584 frame_size > MAX_RX_JUMBO_FRAME_SIZE)
1588 * refuse mtu that requires the support of scattered packets when
1589 * this feature has not been enabled before.
1591 if (!dev->data->scattered_rx &&
1592 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1595 rctl = IGC_READ_REG(hw, IGC_RCTL);
1597 /* switch to jumbo mode if needed */
1598 if (mtu > RTE_ETHER_MTU) {
1599 dev->data->dev_conf.rxmode.offloads |=
1600 DEV_RX_OFFLOAD_JUMBO_FRAME;
1601 rctl |= IGC_RCTL_LPE;
1603 dev->data->dev_conf.rxmode.offloads &=
1604 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1605 rctl &= ~IGC_RCTL_LPE;
1607 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1609 /* update max frame size */
1610 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1612 IGC_WRITE_REG(hw, IGC_RLPML,
1613 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1619 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1620 uint32_t index, uint32_t pool)
1622 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1624 igc_rar_set(hw, mac_addr->addr_bytes, index);
1630 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1632 uint8_t addr[RTE_ETHER_ADDR_LEN];
1633 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1635 memset(addr, 0, sizeof(addr));
1636 igc_rar_set(hw, addr, index);
1640 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
1641 struct rte_ether_addr *addr)
1643 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1644 igc_rar_set(hw, addr->addr_bytes, 0);
1649 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
1650 struct rte_ether_addr *mc_addr_set,
1651 uint32_t nb_mc_addr)
1653 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1654 igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
1659 * Read hardware registers
1662 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)
1666 uint64_t old_gprc = stats->gprc;
1667 uint64_t old_gptc = stats->gptc;
1668 uint64_t old_tpr = stats->tpr;
1669 uint64_t old_tpt = stats->tpt;
1670 uint64_t old_rpthc = stats->rpthc;
1671 uint64_t old_hgptc = stats->hgptc;
1673 stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);
1674 stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);
1675 stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);
1676 stats->mpc += IGC_READ_REG(hw, IGC_MPC);
1677 stats->scc += IGC_READ_REG(hw, IGC_SCC);
1678 stats->ecol += IGC_READ_REG(hw, IGC_ECOL);
1680 stats->mcc += IGC_READ_REG(hw, IGC_MCC);
1681 stats->latecol += IGC_READ_REG(hw, IGC_LATECOL);
1682 stats->colc += IGC_READ_REG(hw, IGC_COLC);
1684 stats->dc += IGC_READ_REG(hw, IGC_DC);
1685 stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);
1686 stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);
1687 stats->rlec += IGC_READ_REG(hw, IGC_RLEC);
1688 stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);
1689 stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);
1692 * For watchdog management we need to know if we have been
1693 * paused during the last interval, so capture that here.
1695 pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);
1696 stats->xoffrxc += pause_frames;
1697 stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);
1698 stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);
1699 stats->prc64 += IGC_READ_REG(hw, IGC_PRC64);
1700 stats->prc127 += IGC_READ_REG(hw, IGC_PRC127);
1701 stats->prc255 += IGC_READ_REG(hw, IGC_PRC255);
1702 stats->prc511 += IGC_READ_REG(hw, IGC_PRC511);
1703 stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);
1704 stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);
1705 stats->gprc += IGC_READ_REG(hw, IGC_GPRC);
1706 stats->bprc += IGC_READ_REG(hw, IGC_BPRC);
1707 stats->mprc += IGC_READ_REG(hw, IGC_MPRC);
1708 stats->gptc += IGC_READ_REG(hw, IGC_GPTC);
1710 /* For the 64-bit byte counters the low dword must be read first. */
1711 /* Both registers clear on the read of the high dword */
1713 /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1714 stats->gorc += IGC_READ_REG(hw, IGC_GORCL);
1715 stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);
1716 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1717 stats->gotc += IGC_READ_REG(hw, IGC_GOTCL);
1718 stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);
1719 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1721 stats->rnbc += IGC_READ_REG(hw, IGC_RNBC);
1722 stats->ruc += IGC_READ_REG(hw, IGC_RUC);
1723 stats->rfc += IGC_READ_REG(hw, IGC_RFC);
1724 stats->roc += IGC_READ_REG(hw, IGC_ROC);
1725 stats->rjc += IGC_READ_REG(hw, IGC_RJC);
1727 stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);
1728 stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);
1729 stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);
1730 stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);
1731 stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);
1732 stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);
1733 stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);
1735 stats->tpr += IGC_READ_REG(hw, IGC_TPR);
1736 stats->tpt += IGC_READ_REG(hw, IGC_TPT);
1738 stats->tor += IGC_READ_REG(hw, IGC_TORL);
1739 stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);
1740 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1741 stats->tot += IGC_READ_REG(hw, IGC_TOTL);
1742 stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);
1743 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1745 stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);
1746 stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);
1747 stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);
1748 stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);
1749 stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);
1750 stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);
1751 stats->mptc += IGC_READ_REG(hw, IGC_MPTC);
1752 stats->bptc += IGC_READ_REG(hw, IGC_BPTC);
1753 stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);
1755 stats->iac += IGC_READ_REG(hw, IGC_IAC);
1756 stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);
1757 stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);
1758 stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);
1760 /* Host to Card Statistics */
1761 stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);
1762 stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);
1763 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1764 stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);
1765 stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);
1766 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1767 stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);
1771 * Write 0 to all queue status registers
1774 igc_reset_queue_stats_register(struct igc_hw *hw)
1778 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1779 IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);
1780 IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);
1781 IGC_WRITE_REG(hw, IGC_PQGORC(i), 0);
1782 IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);
1783 IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);
1784 IGC_WRITE_REG(hw, IGC_RQDPC(i), 0);
1785 IGC_WRITE_REG(hw, IGC_TQDPC(i), 0);
1790 * Read all hardware queue status registers
1793 igc_read_queue_stats_register(struct rte_eth_dev *dev)
1795 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1796 struct igc_hw_queue_stats *queue_stats =
1797 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1801 * This register is not cleared on read. Furthermore, the register wraps
1802 * around back to 0x00000000 on the next increment when reaching a value
1803 * of 0xFFFFFFFF and then continues normal count operation.
1805 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1813 * Read the register first, if the value is smaller than that
1814 * previous read, that mean the register has been overflowed,
1815 * then we add the high 4 bytes by 1 and replace the low 4
1816 * bytes by the new value.
1818 tmp = IGC_READ_REG(hw, IGC_PQGPRC(i));
1819 value.ddword = queue_stats->pqgprc[i];
1820 if (value.dword[U32_0_IN_U64] > tmp)
1821 value.dword[U32_1_IN_U64]++;
1822 value.dword[U32_0_IN_U64] = tmp;
1823 queue_stats->pqgprc[i] = value.ddword;
1825 tmp = IGC_READ_REG(hw, IGC_PQGPTC(i));
1826 value.ddword = queue_stats->pqgptc[i];
1827 if (value.dword[U32_0_IN_U64] > tmp)
1828 value.dword[U32_1_IN_U64]++;
1829 value.dword[U32_0_IN_U64] = tmp;
1830 queue_stats->pqgptc[i] = value.ddword;
1832 tmp = IGC_READ_REG(hw, IGC_PQGORC(i));
1833 value.ddword = queue_stats->pqgorc[i];
1834 if (value.dword[U32_0_IN_U64] > tmp)
1835 value.dword[U32_1_IN_U64]++;
1836 value.dword[U32_0_IN_U64] = tmp;
1837 queue_stats->pqgorc[i] = value.ddword;
1839 tmp = IGC_READ_REG(hw, IGC_PQGOTC(i));
1840 value.ddword = queue_stats->pqgotc[i];
1841 if (value.dword[U32_0_IN_U64] > tmp)
1842 value.dword[U32_1_IN_U64]++;
1843 value.dword[U32_0_IN_U64] = tmp;
1844 queue_stats->pqgotc[i] = value.ddword;
1846 tmp = IGC_READ_REG(hw, IGC_PQMPRC(i));
1847 value.ddword = queue_stats->pqmprc[i];
1848 if (value.dword[U32_0_IN_U64] > tmp)
1849 value.dword[U32_1_IN_U64]++;
1850 value.dword[U32_0_IN_U64] = tmp;
1851 queue_stats->pqmprc[i] = value.ddword;
1853 tmp = IGC_READ_REG(hw, IGC_RQDPC(i));
1854 value.ddword = queue_stats->rqdpc[i];
1855 if (value.dword[U32_0_IN_U64] > tmp)
1856 value.dword[U32_1_IN_U64]++;
1857 value.dword[U32_0_IN_U64] = tmp;
1858 queue_stats->rqdpc[i] = value.ddword;
1860 tmp = IGC_READ_REG(hw, IGC_TQDPC(i));
1861 value.ddword = queue_stats->tqdpc[i];
1862 if (value.dword[U32_0_IN_U64] > tmp)
1863 value.dword[U32_1_IN_U64]++;
1864 value.dword[U32_0_IN_U64] = tmp;
1865 queue_stats->tqdpc[i] = value.ddword;
1870 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1872 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1873 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1874 struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev);
1875 struct igc_hw_queue_stats *queue_stats =
1876 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1880 * Cancel status handler since it will read the queue status registers
1882 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1884 /* Read status register */
1885 igc_read_queue_stats_register(dev);
1886 igc_read_stats_registers(hw, stats);
1888 if (rte_stats == NULL) {
1889 /* Restart queue status handler */
1890 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1891 igc_update_queue_stats_handler, dev);
1896 rte_stats->imissed = stats->mpc;
1897 rte_stats->ierrors = stats->crcerrs +
1898 stats->rlec + stats->ruc + stats->roc +
1899 stats->rxerrc + stats->algnerrc;
1902 rte_stats->oerrors = stats->ecol + stats->latecol;
1904 rte_stats->ipackets = stats->gprc;
1905 rte_stats->opackets = stats->gptc;
1906 rte_stats->ibytes = stats->gorc;
1907 rte_stats->obytes = stats->gotc;
1909 /* Get per-queue statuses */
1910 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1911 /* GET TX queue statuses */
1912 int map_id = igc->txq_stats_map[i];
1914 rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i];
1915 rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i];
1917 /* Get RX queue statuses */
1918 map_id = igc->rxq_stats_map[i];
1920 rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i];
1921 rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i];
1922 rte_stats->q_errors[map_id] += queue_stats->rqdpc[i];
1926 /* Restart queue status handler */
1927 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1928 igc_update_queue_stats_handler, dev);
1933 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1936 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1937 struct igc_hw_stats *hw_stats =
1938 IGC_DEV_PRIVATE_STATS(dev);
1941 igc_read_stats_registers(hw, hw_stats);
1943 if (n < IGC_NB_XSTATS)
1944 return IGC_NB_XSTATS;
1946 /* If this is a reset xstats is NULL, and we have cleared the
1947 * registers by reading them.
1952 /* Extended stats */
1953 for (i = 0; i < IGC_NB_XSTATS; i++) {
1955 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1956 rte_igc_stats_strings[i].offset);
1959 return IGC_NB_XSTATS;
1963 eth_igc_xstats_reset(struct rte_eth_dev *dev)
1965 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1966 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
1967 struct igc_hw_queue_stats *queue_stats =
1968 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1970 /* Cancel queue status handler for avoid conflict */
1971 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1973 /* HW registers are cleared on read */
1974 igc_reset_queue_stats_register(hw);
1975 igc_read_stats_registers(hw, hw_stats);
1977 /* Reset software totals */
1978 memset(hw_stats, 0, sizeof(*hw_stats));
1979 memset(queue_stats, 0, sizeof(*queue_stats));
1981 /* Restart the queue status handler */
1982 rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,
1989 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1990 struct rte_eth_xstat_name *xstats_names, unsigned int size)
1994 if (xstats_names == NULL)
1995 return IGC_NB_XSTATS;
1997 if (size < IGC_NB_XSTATS) {
1998 PMD_DRV_LOG(ERR, "not enough buffers!");
1999 return IGC_NB_XSTATS;
2002 for (i = 0; i < IGC_NB_XSTATS; i++)
2003 strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,
2004 sizeof(xstats_names[i].name));
2006 return IGC_NB_XSTATS;
2010 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
2011 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
2017 return eth_igc_xstats_get_names(dev, xstats_names, limit);
2019 for (i = 0; i < limit; i++) {
2020 if (ids[i] >= IGC_NB_XSTATS) {
2021 PMD_DRV_LOG(ERR, "id value isn't valid");
2024 strlcpy(xstats_names[i].name,
2025 rte_igc_stats_strings[ids[i]].name,
2026 sizeof(xstats_names[i].name));
2032 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2033 uint64_t *values, unsigned int n)
2035 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2036 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
2039 igc_read_stats_registers(hw, hw_stats);
2042 if (n < IGC_NB_XSTATS)
2043 return IGC_NB_XSTATS;
2045 /* If this is a reset xstats is NULL, and we have cleared the
2046 * registers by reading them.
2051 /* Extended stats */
2052 for (i = 0; i < IGC_NB_XSTATS; i++)
2053 values[i] = *(uint64_t *)(((char *)hw_stats) +
2054 rte_igc_stats_strings[i].offset);
2056 return IGC_NB_XSTATS;
2059 for (i = 0; i < n; i++) {
2060 if (ids[i] >= IGC_NB_XSTATS) {
2061 PMD_DRV_LOG(ERR, "id value isn't valid");
2064 values[i] = *(uint64_t *)(((char *)hw_stats) +
2065 rte_igc_stats_strings[ids[i]].offset);
2072 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
2073 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx)
2075 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
2077 /* check queue id is valid */
2078 if (queue_id >= IGC_QUEUE_PAIRS_NUM) {
2079 PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u",
2080 queue_id, IGC_QUEUE_PAIRS_NUM - 1);
2084 /* store the mapping status id */
2086 igc->rxq_stats_map[queue_id] = stat_idx;
2088 igc->txq_stats_map[queue_id] = stat_idx;
2094 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2096 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2097 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2098 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2099 uint32_t vec = IGC_MISC_VEC_ID;
2101 if (rte_intr_allow_others(intr_handle))
2102 vec = IGC_RX_VEC_START;
2104 uint32_t mask = 1u << (queue_id + vec);
2106 IGC_WRITE_REG(hw, IGC_EIMC, mask);
2107 IGC_WRITE_FLUSH(hw);
2113 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2115 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2116 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2117 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2118 uint32_t vec = IGC_MISC_VEC_ID;
2120 if (rte_intr_allow_others(intr_handle))
2121 vec = IGC_RX_VEC_START;
2123 uint32_t mask = 1u << (queue_id + vec);
2125 IGC_WRITE_REG(hw, IGC_EIMS, mask);
2126 IGC_WRITE_FLUSH(hw);
2128 rte_intr_enable(intr_handle);
2134 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2136 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2141 fc_conf->pause_time = hw->fc.pause_time;
2142 fc_conf->high_water = hw->fc.high_water;
2143 fc_conf->low_water = hw->fc.low_water;
2144 fc_conf->send_xon = hw->fc.send_xon;
2145 fc_conf->autoneg = hw->mac.autoneg;
2148 * Return rx_pause and tx_pause status according to actual setting of
2149 * the TFCE and RFCE bits in the CTRL register.
2151 ctrl = IGC_READ_REG(hw, IGC_CTRL);
2152 if (ctrl & IGC_CTRL_TFCE)
2157 if (ctrl & IGC_CTRL_RFCE)
2162 if (rx_pause && tx_pause)
2163 fc_conf->mode = RTE_FC_FULL;
2165 fc_conf->mode = RTE_FC_RX_PAUSE;
2167 fc_conf->mode = RTE_FC_TX_PAUSE;
2169 fc_conf->mode = RTE_FC_NONE;
2175 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2177 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2178 uint32_t rx_buf_size;
2179 uint32_t max_high_water;
2183 if (fc_conf->autoneg != hw->mac.autoneg)
2186 rx_buf_size = igc_get_rx_buffer_size(hw);
2187 PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2189 /* At least reserve one Ethernet frame for watermark */
2190 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
2191 if (fc_conf->high_water > max_high_water ||
2192 fc_conf->high_water < fc_conf->low_water) {
2194 "Incorrect high(%u)/low(%u) water value, max is %u",
2195 fc_conf->high_water, fc_conf->low_water,
2200 switch (fc_conf->mode) {
2202 hw->fc.requested_mode = igc_fc_none;
2204 case RTE_FC_RX_PAUSE:
2205 hw->fc.requested_mode = igc_fc_rx_pause;
2207 case RTE_FC_TX_PAUSE:
2208 hw->fc.requested_mode = igc_fc_tx_pause;
2211 hw->fc.requested_mode = igc_fc_full;
2214 PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode);
2218 hw->fc.pause_time = fc_conf->pause_time;
2219 hw->fc.high_water = fc_conf->high_water;
2220 hw->fc.low_water = fc_conf->low_water;
2221 hw->fc.send_xon = fc_conf->send_xon;
2223 err = igc_setup_link_generic(hw);
2224 if (err == IGC_SUCCESS) {
2226 * check if we want to forward MAC frames - driver doesn't have
2227 * native capability to do that, so we'll write the registers
2230 rctl = IGC_READ_REG(hw, IGC_RCTL);
2232 /* set or clear MFLCN.PMCF bit depending on configuration */
2233 if (fc_conf->mac_ctrl_frame_fwd != 0)
2234 rctl |= IGC_RCTL_PMCF;
2236 rctl &= ~IGC_RCTL_PMCF;
2238 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
2239 IGC_WRITE_FLUSH(hw);
2244 PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err);
2249 eth_igc_rss_reta_update(struct rte_eth_dev *dev,
2250 struct rte_eth_rss_reta_entry64 *reta_conf,
2253 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2256 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2258 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2259 reta_size, ETH_RSS_RETA_SIZE_128);
2263 RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
2265 /* set redirection table */
2266 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2267 union igc_rss_reta_reg reta, reg;
2268 uint16_t idx, shift;
2271 idx = i / RTE_RETA_GROUP_SIZE;
2272 shift = i % RTE_RETA_GROUP_SIZE;
2273 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2274 IGC_RSS_RDT_REG_SIZE_MASK);
2276 /* if no need to update the register */
2278 shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2281 /* check mask whether need to read the register value first */
2282 if (mask == IGC_RSS_RDT_REG_SIZE_MASK)
2285 reg.dword = IGC_READ_REG_LE_VALUE(hw,
2286 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2288 /* update the register */
2289 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2290 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2291 if (mask & (1u << j))
2293 (uint8_t)reta_conf[idx].reta[shift + j];
2295 reta.bytes[j] = reg.bytes[j];
2297 IGC_WRITE_REG_LE_VALUE(hw,
2298 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword);
2305 eth_igc_rss_reta_query(struct rte_eth_dev *dev,
2306 struct rte_eth_rss_reta_entry64 *reta_conf,
2309 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2312 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2314 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2315 reta_size, ETH_RSS_RETA_SIZE_128);
2319 RTE_BUILD_BUG_ON(ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
2321 /* read redirection table */
2322 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2323 union igc_rss_reta_reg reta;
2324 uint16_t idx, shift;
2327 idx = i / RTE_RETA_GROUP_SIZE;
2328 shift = i % RTE_RETA_GROUP_SIZE;
2329 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2330 IGC_RSS_RDT_REG_SIZE_MASK);
2332 /* if no need to read register */
2334 shift > (RTE_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2337 /* read register and get the queue index */
2338 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2339 reta.dword = IGC_READ_REG_LE_VALUE(hw,
2340 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2341 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2342 if (mask & (1u << j))
2343 reta_conf[idx].reta[shift + j] = reta.bytes[j];
2351 eth_igc_rss_hash_update(struct rte_eth_dev *dev,
2352 struct rte_eth_rss_conf *rss_conf)
2354 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2355 igc_hw_rss_hash_set(hw, rss_conf);
2360 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
2361 struct rte_eth_rss_conf *rss_conf)
2363 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2364 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
2368 if (hash_key != NULL) {
2371 /* if not enough space for store hash key */
2372 if (rss_conf->rss_key_len != IGC_HKEY_SIZE) {
2374 "RSS hash key size %u in parameter doesn't match the hardware hash key size %u",
2375 rss_conf->rss_key_len, IGC_HKEY_SIZE);
2379 /* read RSS key from register */
2380 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
2381 hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i));
2384 /* get RSS functions configured in MRQC register */
2385 mrqc = IGC_READ_REG(hw, IGC_MRQC);
2386 if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0)
2390 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
2391 rss_hf |= ETH_RSS_IPV4;
2392 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
2393 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2394 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
2395 rss_hf |= ETH_RSS_IPV6;
2396 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
2397 rss_hf |= ETH_RSS_IPV6_EX;
2398 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
2399 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2400 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
2401 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2402 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
2403 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2404 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
2405 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2406 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
2407 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2409 rss_conf->rss_hf |= rss_hf;
2414 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2416 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2417 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2422 vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK;
2423 vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK);
2424 vfta = shadow_vfta->vfta[vid_idx];
2429 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta);
2431 /* update local VFTA copy */
2432 shadow_vfta->vfta[vid_idx] = vfta;
2438 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2440 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2441 igc_read_reg_check_clear_bits(hw, IGC_RCTL,
2442 IGC_RCTL_CFIEN | IGC_RCTL_VFE);
2446 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2448 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2449 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2453 /* Filter Table Enable, CFI not used for packet acceptance */
2454 reg_val = IGC_READ_REG(hw, IGC_RCTL);
2455 reg_val &= ~IGC_RCTL_CFIEN;
2456 reg_val |= IGC_RCTL_VFE;
2457 IGC_WRITE_REG(hw, IGC_RCTL, reg_val);
2459 /* restore VFTA table */
2460 for (i = 0; i < IGC_VFTA_SIZE; i++)
2461 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]);
2465 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2467 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2469 igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2473 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2475 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2477 igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2481 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2483 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2486 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2488 /* if extend vlan hasn't been enabled */
2489 if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
2492 if ((dev->data->dev_conf.rxmode.offloads &
2493 DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
2494 goto write_ext_vlan;
2496 /* Update maximum packet length */
2497 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <
2498 RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
2499 PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
2500 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2501 VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
2504 dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE;
2505 IGC_WRITE_REG(hw, IGC_RLPML,
2506 dev->data->dev_conf.rxmode.max_rx_pkt_len);
2509 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
2514 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2516 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2519 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2521 /* if extend vlan has been enabled */
2522 if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
2525 if ((dev->data->dev_conf.rxmode.offloads &
2526 DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
2527 goto write_ext_vlan;
2529 /* Update maximum packet length */
2530 if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
2531 MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) {
2532 PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
2533 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2534 VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE);
2537 dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE;
2538 IGC_WRITE_REG(hw, IGC_RLPML,
2539 dev->data->dev_conf.rxmode.max_rx_pkt_len);
2542 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
2547 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2549 struct rte_eth_rxmode *rxmode;
2551 rxmode = &dev->data->dev_conf.rxmode;
2552 if (mask & ETH_VLAN_STRIP_MASK) {
2553 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2554 igc_vlan_hw_strip_enable(dev);
2556 igc_vlan_hw_strip_disable(dev);
2559 if (mask & ETH_VLAN_FILTER_MASK) {
2560 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2561 igc_vlan_hw_filter_enable(dev);
2563 igc_vlan_hw_filter_disable(dev);
2566 if (mask & ETH_VLAN_EXTEND_MASK) {
2567 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2568 return igc_vlan_hw_extend_enable(dev);
2570 return igc_vlan_hw_extend_disable(dev);
2577 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
2578 enum rte_vlan_type vlan_type,
2581 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2584 /* only outer TPID of double VLAN can be configured*/
2585 if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2586 reg_val = IGC_READ_REG(hw, IGC_VET);
2587 reg_val = (reg_val & (~IGC_VET_EXT)) |
2588 ((uint32_t)tpid << IGC_VET_EXT_SHIFT);
2589 IGC_WRITE_REG(hw, IGC_VET, reg_val);
2594 /* all other TPID values are read-only*/
2595 PMD_DRV_LOG(ERR, "Not supported");
2600 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2601 struct rte_pci_device *pci_dev)
2603 PMD_INIT_FUNC_TRACE();
2604 return rte_eth_dev_pci_generic_probe(pci_dev,
2605 sizeof(struct igc_adapter), eth_igc_dev_init);
2609 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
2611 PMD_INIT_FUNC_TRACE();
2612 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
2615 static struct rte_pci_driver rte_igc_pmd = {
2616 .id_table = pci_id_igc_map,
2617 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2618 .probe = eth_igc_pci_probe,
2619 .remove = eth_igc_pci_remove,
2622 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
2623 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
2624 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");