1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
8 #include <rte_string_fns.h>
10 #include <rte_bus_pci.h>
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
13 #include <rte_malloc.h>
14 #include <rte_alarm.h>
18 #include "igc_filter.h"
21 #define IGC_INTEL_VENDOR_ID 0x8086
23 #define IGC_FC_PAUSE_TIME 0x0680
24 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
25 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
27 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
28 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
29 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */
30 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */
32 #define IGC_DEFAULT_RX_FREE_THRESH 32
34 #define IGC_DEFAULT_RX_PTHRESH 8
35 #define IGC_DEFAULT_RX_HTHRESH 8
36 #define IGC_DEFAULT_RX_WTHRESH 4
38 #define IGC_DEFAULT_TX_PTHRESH 8
39 #define IGC_DEFAULT_TX_HTHRESH 1
40 #define IGC_DEFAULT_TX_WTHRESH 16
42 /* MSI-X other interrupt vector */
43 #define IGC_MSIX_OTHER_INTR_VEC 0
45 /* External VLAN Enable bit mask */
46 #define IGC_CTRL_EXT_EXT_VLAN (1u << 26)
49 #define IGC_CTRL_SPEED_MASK (7u << 8)
50 #define IGC_CTRL_SPEED_2500 (6u << 8)
52 /* External VLAN Ether Type bit mask and shift */
53 #define IGC_VET_EXT 0xFFFF0000
54 #define IGC_VET_EXT_SHIFT 16
56 /* Force EEE Auto-negotiation */
57 #define IGC_EEER_EEE_FRC_AN (1u << 28)
59 /* Per Queue Good Packets Received Count */
60 #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx))
61 /* Per Queue Good Octets Received Count */
62 #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx))
63 /* Per Queue Good Octets Transmitted Count */
64 #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx))
65 /* Per Queue Multicast Packets Received Count */
66 #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx))
67 /* Transmit Queue Drop Packet Count */
68 #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx))
70 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
71 #define U32_0_IN_U64 0 /* lower bytes of u64 */
72 #define U32_1_IN_U64 1 /* higher bytes of u64 */
74 #define U32_0_IN_U64 1
75 #define U32_1_IN_U64 0
78 #define IGC_ALARM_INTERVAL 8000000u
79 /* us, about 13.6s some per-queue registers will wrap around back to 0. */
81 static const struct rte_eth_desc_lim rx_desc_lim = {
82 .nb_max = IGC_MAX_RXD,
83 .nb_min = IGC_MIN_RXD,
84 .nb_align = IGC_RXD_ALIGN,
87 static const struct rte_eth_desc_lim tx_desc_lim = {
88 .nb_max = IGC_MAX_TXD,
89 .nb_min = IGC_MIN_TXD,
90 .nb_align = IGC_TXD_ALIGN,
91 .nb_seg_max = IGC_TX_MAX_SEG,
92 .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,
95 static const struct rte_pci_id pci_id_igc_map[] = {
96 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
97 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) },
98 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) },
99 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) },
100 { .vendor_id = 0, /* sentinel */ },
103 /* store statistics names and its offset in stats structure */
104 struct rte_igc_xstats_name_off {
105 char name[RTE_ETH_XSTATS_NAME_SIZE];
109 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {
110 {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)},
111 {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)},
112 {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)},
113 {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)},
114 {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)},
115 {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)},
116 {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats,
118 {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)},
119 {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)},
120 {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)},
121 {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)},
122 {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)},
123 {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)},
124 {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)},
125 {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)},
126 {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)},
127 {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)},
128 {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats,
130 {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)},
131 {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)},
132 {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)},
133 {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)},
134 {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
136 {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats,
138 {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)},
139 {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)},
140 {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)},
141 {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)},
142 {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)},
143 {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)},
144 {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)},
145 {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)},
146 {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)},
147 {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)},
148 {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)},
149 {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)},
150 {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)},
151 {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)},
152 {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)},
153 {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)},
154 {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)},
155 {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)},
156 {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
158 {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats,
160 {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)},
161 {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)},
162 {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)},
163 {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)},
164 {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)},
165 {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)},
166 {"rx_descriptor_lower_threshold",
167 offsetof(struct igc_hw_stats, icrxdmtc)},
170 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \
171 sizeof(rte_igc_stats_strings[0]))
173 static int eth_igc_configure(struct rte_eth_dev *dev);
174 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
175 static int eth_igc_stop(struct rte_eth_dev *dev);
176 static int eth_igc_start(struct rte_eth_dev *dev);
177 static int eth_igc_set_link_up(struct rte_eth_dev *dev);
178 static int eth_igc_set_link_down(struct rte_eth_dev *dev);
179 static int eth_igc_close(struct rte_eth_dev *dev);
180 static int eth_igc_reset(struct rte_eth_dev *dev);
181 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
182 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
183 static int eth_igc_fw_version_get(struct rte_eth_dev *dev,
184 char *fw_version, size_t fw_size);
185 static int eth_igc_infos_get(struct rte_eth_dev *dev,
186 struct rte_eth_dev_info *dev_info);
187 static int eth_igc_led_on(struct rte_eth_dev *dev);
188 static int eth_igc_led_off(struct rte_eth_dev *dev);
189 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);
190 static int eth_igc_rar_set(struct rte_eth_dev *dev,
191 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);
192 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);
193 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
194 struct rte_ether_addr *addr);
195 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
196 struct rte_ether_addr *mc_addr_set,
197 uint32_t nb_mc_addr);
198 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);
199 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);
200 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
201 static int eth_igc_stats_get(struct rte_eth_dev *dev,
202 struct rte_eth_stats *rte_stats);
203 static int eth_igc_xstats_get(struct rte_eth_dev *dev,
204 struct rte_eth_xstat *xstats, unsigned int n);
205 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev,
207 uint64_t *values, unsigned int n);
208 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,
209 struct rte_eth_xstat_name *xstats_names,
211 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
212 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
214 static int eth_igc_xstats_reset(struct rte_eth_dev *dev);
216 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
217 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx);
219 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
221 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
223 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
225 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
226 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev,
227 struct rte_eth_rss_reta_entry64 *reta_conf,
229 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev,
230 struct rte_eth_rss_reta_entry64 *reta_conf,
232 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev,
233 struct rte_eth_rss_conf *rss_conf);
234 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
235 struct rte_eth_rss_conf *rss_conf);
237 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
238 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
239 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
240 enum rte_vlan_type vlan_type, uint16_t tpid);
242 static const struct eth_dev_ops eth_igc_ops = {
243 .dev_configure = eth_igc_configure,
244 .link_update = eth_igc_link_update,
245 .dev_stop = eth_igc_stop,
246 .dev_start = eth_igc_start,
247 .dev_close = eth_igc_close,
248 .dev_reset = eth_igc_reset,
249 .dev_set_link_up = eth_igc_set_link_up,
250 .dev_set_link_down = eth_igc_set_link_down,
251 .promiscuous_enable = eth_igc_promiscuous_enable,
252 .promiscuous_disable = eth_igc_promiscuous_disable,
253 .allmulticast_enable = eth_igc_allmulticast_enable,
254 .allmulticast_disable = eth_igc_allmulticast_disable,
255 .fw_version_get = eth_igc_fw_version_get,
256 .dev_infos_get = eth_igc_infos_get,
257 .dev_led_on = eth_igc_led_on,
258 .dev_led_off = eth_igc_led_off,
259 .dev_supported_ptypes_get = eth_igc_supported_ptypes_get,
260 .mtu_set = eth_igc_mtu_set,
261 .mac_addr_add = eth_igc_rar_set,
262 .mac_addr_remove = eth_igc_rar_clear,
263 .mac_addr_set = eth_igc_default_mac_addr_set,
264 .set_mc_addr_list = eth_igc_set_mc_addr_list,
266 .rx_queue_setup = eth_igc_rx_queue_setup,
267 .rx_queue_release = eth_igc_rx_queue_release,
268 .tx_queue_setup = eth_igc_tx_queue_setup,
269 .tx_queue_release = eth_igc_tx_queue_release,
270 .tx_done_cleanup = eth_igc_tx_done_cleanup,
271 .rxq_info_get = eth_igc_rxq_info_get,
272 .txq_info_get = eth_igc_txq_info_get,
273 .stats_get = eth_igc_stats_get,
274 .xstats_get = eth_igc_xstats_get,
275 .xstats_get_by_id = eth_igc_xstats_get_by_id,
276 .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id,
277 .xstats_get_names = eth_igc_xstats_get_names,
278 .stats_reset = eth_igc_xstats_reset,
279 .xstats_reset = eth_igc_xstats_reset,
280 .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,
281 .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable,
282 .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable,
283 .flow_ctrl_get = eth_igc_flow_ctrl_get,
284 .flow_ctrl_set = eth_igc_flow_ctrl_set,
285 .reta_update = eth_igc_rss_reta_update,
286 .reta_query = eth_igc_rss_reta_query,
287 .rss_hash_update = eth_igc_rss_hash_update,
288 .rss_hash_conf_get = eth_igc_rss_hash_conf_get,
289 .vlan_filter_set = eth_igc_vlan_filter_set,
290 .vlan_offload_set = eth_igc_vlan_offload_set,
291 .vlan_tpid_set = eth_igc_vlan_tpid_set,
292 .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set,
293 .flow_ops_get = eth_igc_flow_ops_get,
297 * multiple queue mode checking
300 igc_check_mq_mode(struct rte_eth_dev *dev)
302 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
303 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
305 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
306 PMD_INIT_LOG(ERR, "SRIOV is not supported.");
310 if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
311 rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
312 /* RSS together with VMDq not supported*/
313 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
318 /* To no break software that set invalid mode, only display
319 * warning if invalid mode is used.
321 if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
322 PMD_INIT_LOG(WARNING,
323 "TX mode %d is not supported. Due to meaningless in this driver, just ignore",
330 eth_igc_configure(struct rte_eth_dev *dev)
332 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
335 PMD_INIT_FUNC_TRACE();
337 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
338 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
340 ret = igc_check_mq_mode(dev);
344 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
349 eth_igc_set_link_up(struct rte_eth_dev *dev)
351 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
353 if (hw->phy.media_type == igc_media_type_copper)
354 igc_power_up_phy(hw);
356 igc_power_up_fiber_serdes_link(hw);
361 eth_igc_set_link_down(struct rte_eth_dev *dev)
363 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
365 if (hw->phy.media_type == igc_media_type_copper)
366 igc_power_down_phy(hw);
368 igc_shutdown_fiber_serdes_link(hw);
373 * disable other interrupt
376 igc_intr_other_disable(struct rte_eth_dev *dev)
378 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
379 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
380 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
382 if (rte_intr_allow_others(intr_handle) &&
383 dev->data->dev_conf.intr_conf.lsc) {
384 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC);
387 IGC_WRITE_REG(hw, IGC_IMC, ~0);
392 * enable other interrupt
395 igc_intr_other_enable(struct rte_eth_dev *dev)
397 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
398 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
399 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
400 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
402 if (rte_intr_allow_others(intr_handle) &&
403 dev->data->dev_conf.intr_conf.lsc) {
404 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC);
407 IGC_WRITE_REG(hw, IGC_IMS, intr->mask);
412 * It reads ICR and gets interrupt causes, check it and set a bit flag
413 * to update link status.
416 eth_igc_interrupt_get_status(struct rte_eth_dev *dev)
419 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
420 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
422 /* read-on-clear nic registers here */
423 icr = IGC_READ_REG(hw, IGC_ICR);
426 if (icr & IGC_ICR_LSC)
427 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
430 /* return 0 means link status changed, -1 means not changed */
432 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
434 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
435 struct rte_eth_link link;
436 int link_check, count;
439 hw->mac.get_link_status = 1;
441 /* possible wait-to-complete in up to 9 seconds */
442 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {
443 /* Read the real link status */
444 switch (hw->phy.media_type) {
445 case igc_media_type_copper:
446 /* Do the work to read phy */
447 igc_check_for_link(hw);
448 link_check = !hw->mac.get_link_status;
451 case igc_media_type_fiber:
452 igc_check_for_link(hw);
453 link_check = (IGC_READ_REG(hw, IGC_STATUS) &
457 case igc_media_type_internal_serdes:
458 igc_check_for_link(hw);
459 link_check = hw->mac.serdes_has_link;
465 if (link_check || wait_to_complete == 0)
467 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);
469 memset(&link, 0, sizeof(link));
471 /* Now we check if a transition has happened */
473 uint16_t duplex, speed;
474 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
475 link.link_duplex = (duplex == FULL_DUPLEX) ?
476 RTE_ETH_LINK_FULL_DUPLEX :
477 RTE_ETH_LINK_HALF_DUPLEX;
478 link.link_speed = speed;
479 link.link_status = RTE_ETH_LINK_UP;
480 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
481 RTE_ETH_LINK_SPEED_FIXED);
483 if (speed == SPEED_2500) {
484 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
485 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {
486 tipg &= ~IGC_TIPG_IPGT_MASK;
488 IGC_WRITE_REG(hw, IGC_TIPG, tipg);
493 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
494 link.link_status = RTE_ETH_LINK_DOWN;
495 link.link_autoneg = RTE_ETH_LINK_FIXED;
498 return rte_eth_linkstatus_set(dev, &link);
502 * It executes link_update after knowing an interrupt is present.
505 eth_igc_interrupt_action(struct rte_eth_dev *dev)
507 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
508 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
509 struct rte_eth_link link;
512 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {
513 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
515 /* set get_link_status to check register later */
516 ret = eth_igc_link_update(dev, 0);
518 /* check if link has changed */
522 rte_eth_linkstatus_get(dev, &link);
523 if (link.link_status)
525 " Port %d: Link Up - speed %u Mbps - %s",
527 (unsigned int)link.link_speed,
528 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
529 "full-duplex" : "half-duplex");
531 PMD_DRV_LOG(INFO, " Port %d: Link Down",
534 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
535 pci_dev->addr.domain,
538 pci_dev->addr.function);
539 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
544 * Interrupt handler which shall be registered at first.
547 * Pointer to interrupt handle.
549 * The address of parameter (struct rte_eth_dev *) registered before.
552 eth_igc_interrupt_handler(void *param)
554 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
556 eth_igc_interrupt_get_status(dev);
557 eth_igc_interrupt_action(dev);
560 static void igc_read_queue_stats_register(struct rte_eth_dev *dev);
563 * Update the queue status every IGC_ALARM_INTERVAL time.
565 * The address of parameter (struct rte_eth_dev *) registered before.
568 igc_update_queue_stats_handler(void *param)
570 struct rte_eth_dev *dev = param;
571 igc_read_queue_stats_register(dev);
572 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
573 igc_update_queue_stats_handler, dev);
577 * rx,tx enable/disable
580 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)
582 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
585 tctl = IGC_READ_REG(hw, IGC_TCTL);
586 rctl = IGC_READ_REG(hw, IGC_RCTL);
594 tctl &= ~IGC_TCTL_EN;
595 rctl &= ~IGC_RCTL_EN;
597 IGC_WRITE_REG(hw, IGC_TCTL, tctl);
598 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
603 * This routine disables all traffic on the adapter by issuing a
604 * global reset on the MAC.
607 eth_igc_stop(struct rte_eth_dev *dev)
609 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
610 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
611 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
612 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
613 struct rte_eth_link link;
615 dev->data->dev_started = 0;
616 adapter->stopped = 1;
618 /* disable receive and transmit */
619 eth_igc_rxtx_control(dev, false);
621 /* disable all MSI-X interrupts */
622 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
625 /* clear all MSI-X interrupts */
626 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
628 igc_intr_other_disable(dev);
630 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
632 /* disable intr eventfd mapping */
633 rte_intr_disable(intr_handle);
637 /* disable all wake up */
638 IGC_WRITE_REG(hw, IGC_WUC, 0);
640 /* disable checking EEE operation in MAC loopback mode */
641 igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
643 /* Set bit for Go Link disconnect */
644 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
645 IGC_82580_PM_GO_LINKD);
647 /* Power down the phy. Needed to make the link go Down */
648 eth_igc_set_link_down(dev);
650 igc_dev_clear_queues(dev);
652 /* clear the recorded link status */
653 memset(&link, 0, sizeof(link));
654 rte_eth_linkstatus_set(dev, &link);
656 if (!rte_intr_allow_others(intr_handle))
657 /* resume to the default handler */
658 rte_intr_callback_register(intr_handle,
659 eth_igc_interrupt_handler,
662 /* Clean datapath event and queue/vec mapping */
663 rte_intr_efd_disable(intr_handle);
664 rte_intr_vec_list_free(intr_handle);
670 * write interrupt vector allocation register
672 * board private structure
674 * queue index, valid 0,1,2,3
678 * msix-vector, valid 0,1,2,3,4
681 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index,
682 bool tx, uint8_t msix_vector)
685 uint8_t reg_index = queue_index >> 1;
690 * bit31...24 bit23...16 bit15...8 bit7...0
694 * bit31...24 bit23...16 bit15...8 bit7...0
704 val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index);
707 val &= ~((uint32_t)0xFF << offset);
709 /* write vector and valid bit */
710 val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset;
712 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val);
715 /* Sets up the hardware to generate MSI-X interrupts properly
717 * board private structure
720 igc_configure_msix_intr(struct rte_eth_dev *dev)
722 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
723 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
724 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
727 uint32_t vec = IGC_MISC_VEC_ID;
728 uint32_t base = IGC_MISC_VEC_ID;
729 uint32_t misc_shift = 0;
732 /* won't configure msix register if no mapping is done
733 * between intr vector and event fd
735 if (!rte_intr_dp_is_en(intr_handle))
738 if (rte_intr_allow_others(intr_handle)) {
739 base = IGC_RX_VEC_START;
744 /* turn on MSI-X capability first */
745 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
746 IGC_GPIE_PBA | IGC_GPIE_EIAME |
749 nb_efd = rte_intr_nb_efd_get(intr_handle);
753 intr_mask = RTE_LEN2MASK(nb_efd, uint32_t) << misc_shift;
755 if (dev->data->dev_conf.intr_conf.lsc)
756 intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC);
758 /* enable msix auto-clear */
759 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask);
761 /* set other cause interrupt vector */
762 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC,
763 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8);
765 /* enable auto-mask */
766 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask);
768 for (i = 0; i < dev->data->nb_rx_queues; i++) {
769 igc_write_ivar(hw, i, 0, vec);
770 rte_intr_vec_list_index_set(intr_handle, i, vec);
771 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
779 * It enables the interrupt mask and then enable the interrupt.
782 * Pointer to struct rte_eth_dev.
787 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
789 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
792 intr->mask |= IGC_ICR_LSC;
794 intr->mask &= ~IGC_ICR_LSC;
798 * It enables the interrupt.
799 * It will be called once only during nic initialized.
802 igc_rxq_interrupt_setup(struct rte_eth_dev *dev)
805 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
806 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
807 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
808 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
811 /* won't configure msix register if no mapping is done
812 * between intr vector and event fd
814 if (!rte_intr_dp_is_en(intr_handle))
817 nb_efd = rte_intr_nb_efd_get(intr_handle);
821 mask = RTE_LEN2MASK(nb_efd, uint32_t) << misc_shift;
822 IGC_WRITE_REG(hw, IGC_EIMS, mask);
826 * Get hardware rx-buffer size.
829 igc_get_rx_buffer_size(struct igc_hw *hw)
831 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
835 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
836 * For ASF and Pass Through versions of f/w this means
837 * that the driver is loaded.
840 igc_hw_control_acquire(struct igc_hw *hw)
844 /* Let firmware know the driver has taken over */
845 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
846 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
850 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
851 * For ASF and Pass Through versions of f/w this means that the
852 * driver is no longer loaded.
855 igc_hw_control_release(struct igc_hw *hw)
859 /* Let firmware taken over control of h/w */
860 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
861 IGC_WRITE_REG(hw, IGC_CTRL_EXT,
862 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
866 igc_hardware_init(struct igc_hw *hw)
868 uint32_t rx_buf_size;
871 /* Let the firmware know the OS is in control */
872 igc_hw_control_acquire(hw);
874 /* Issue a global reset */
877 /* disable all wake up */
878 IGC_WRITE_REG(hw, IGC_WUC, 0);
881 * Hardware flow control
882 * - High water mark should allow for at least two standard size (1518)
883 * frames to be received after sending an XOFF.
884 * - Low water mark works best when it is very near the high water mark.
885 * This allows the receiver to restart by sending XON when it has
886 * drained a bit. Here we use an arbitrary value of 1500 which will
887 * restart after one full frame is pulled from the buffer. There
888 * could be several smaller frames in the buffer and if so they will
889 * not trigger the XON until their total number reduces the buffer
892 rx_buf_size = igc_get_rx_buffer_size(hw);
893 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
894 hw->fc.low_water = hw->fc.high_water - 1500;
895 hw->fc.pause_time = IGC_FC_PAUSE_TIME;
897 hw->fc.requested_mode = igc_fc_full;
899 diag = igc_init_hw(hw);
903 igc_get_phy_info(hw);
904 igc_check_for_link(hw);
910 eth_igc_start(struct rte_eth_dev *dev)
912 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
913 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
914 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
915 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
919 PMD_INIT_FUNC_TRACE();
921 /* disable all MSI-X interrupts */
922 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
925 /* clear all MSI-X interrupts */
926 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
928 /* disable uio/vfio intr/eventfd mapping */
929 if (!adapter->stopped)
930 rte_intr_disable(intr_handle);
932 /* Power up the phy. Needed to make the link go Up */
933 eth_igc_set_link_up(dev);
935 /* Put the address into the Receive Address Array */
936 igc_rar_set(hw, hw->mac.addr, 0);
938 /* Initialize the hardware */
939 if (igc_hardware_init(hw)) {
940 PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
943 adapter->stopped = 0;
945 /* check and configure queue intr-vector mapping */
946 if (rte_intr_cap_multiple(intr_handle) &&
947 dev->data->dev_conf.intr_conf.rxq) {
948 uint32_t intr_vector = dev->data->nb_rx_queues;
949 if (rte_intr_efd_enable(intr_handle, intr_vector))
953 if (rte_intr_dp_is_en(intr_handle)) {
954 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
955 dev->data->nb_rx_queues)) {
957 "Failed to allocate %d rx_queues intr_vec",
958 dev->data->nb_rx_queues);
963 /* configure msix for rx interrupt */
964 igc_configure_msix_intr(dev);
968 /* This can fail when allocating mbufs for descriptor rings */
969 ret = igc_rx_init(dev);
971 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware");
972 igc_dev_clear_queues(dev);
976 igc_clear_hw_cntrs_base_generic(hw);
978 /* VLAN Offload Settings */
979 eth_igc_vlan_offload_set(dev,
980 RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
981 RTE_ETH_VLAN_EXTEND_MASK);
983 /* Setup link speed and duplex */
984 speeds = &dev->data->dev_conf.link_speeds;
985 if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
986 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
991 if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
993 "Force speed mode currently not supported");
994 igc_dev_clear_queues(dev);
998 hw->phy.autoneg_advertised = 0;
1001 if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
1002 RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
1003 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
1005 goto error_invalid_config;
1007 if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
1008 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1011 if (*speeds & RTE_ETH_LINK_SPEED_10M) {
1012 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1015 if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
1016 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1019 if (*speeds & RTE_ETH_LINK_SPEED_100M) {
1020 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1023 if (*speeds & RTE_ETH_LINK_SPEED_1G) {
1024 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1027 if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
1028 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
1031 if (num_speeds == 0)
1032 goto error_invalid_config;
1037 if (rte_intr_allow_others(intr_handle)) {
1038 /* check if lsc interrupt is enabled */
1039 if (dev->data->dev_conf.intr_conf.lsc)
1040 igc_lsc_interrupt_setup(dev, 1);
1042 igc_lsc_interrupt_setup(dev, 0);
1044 rte_intr_callback_unregister(intr_handle,
1045 eth_igc_interrupt_handler,
1047 if (dev->data->dev_conf.intr_conf.lsc)
1049 "LSC won't enable because of no intr multiplex");
1052 /* enable uio/vfio intr/eventfd mapping */
1053 rte_intr_enable(intr_handle);
1055 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1056 igc_update_queue_stats_handler, dev);
1058 /* check if rxq interrupt is enabled */
1059 if (dev->data->dev_conf.intr_conf.rxq &&
1060 rte_intr_dp_is_en(intr_handle))
1061 igc_rxq_interrupt_setup(dev);
1063 /* resume enabled intr since hw reset */
1064 igc_intr_other_enable(dev);
1066 eth_igc_rxtx_control(dev, true);
1067 eth_igc_link_update(dev, 0);
1069 /* configure MAC-loopback mode */
1070 if (dev->data->dev_conf.lpbk_mode == 1) {
1073 reg_val = IGC_READ_REG(hw, IGC_CTRL);
1074 reg_val &= ~IGC_CTRL_SPEED_MASK;
1075 reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD |
1076 IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500;
1077 IGC_WRITE_REG(hw, IGC_CTRL, reg_val);
1079 igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
1084 error_invalid_config:
1085 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1086 dev->data->dev_conf.link_speeds, dev->data->port_id);
1087 igc_dev_clear_queues(dev);
1092 igc_reset_swfw_lock(struct igc_hw *hw)
1097 * Do mac ops initialization manually here, since we will need
1098 * some function pointers set by this call.
1100 ret_val = igc_init_mac_params(hw);
1105 * SMBI lock should not fail in this early stage. If this is the case,
1106 * it is due to an improper exit of the application.
1107 * So force the release of the faulty lock.
1109 if (igc_get_hw_semaphore_generic(hw) < 0)
1110 PMD_DRV_LOG(DEBUG, "SMBI lock released");
1112 igc_put_hw_semaphore_generic(hw);
1114 if (hw->mac.ops.acquire_swfw_sync != NULL) {
1118 * Phy lock should not fail in this early stage.
1119 * If this is the case, it is due to an improper exit of the
1120 * application. So force the release of the faulty lock.
1122 mask = IGC_SWFW_PHY0_SM;
1123 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
1124 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
1127 hw->mac.ops.release_swfw_sync(hw, mask);
1130 * This one is more tricky since it is common to all ports; but
1131 * swfw_sync retries last long enough (1s) to be almost sure
1132 * that if lock can not be taken it is due to an improper lock
1135 mask = IGC_SWFW_EEP_SM;
1136 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
1137 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1139 hw->mac.ops.release_swfw_sync(hw, mask);
1146 * free all rx/tx queues.
1149 igc_dev_free_queues(struct rte_eth_dev *dev)
1153 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1154 eth_igc_rx_queue_release(dev, i);
1155 dev->data->rx_queues[i] = NULL;
1157 dev->data->nb_rx_queues = 0;
1159 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1160 eth_igc_tx_queue_release(dev, i);
1161 dev->data->tx_queues[i] = NULL;
1163 dev->data->nb_tx_queues = 0;
1167 eth_igc_close(struct rte_eth_dev *dev)
1169 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1170 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1171 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1172 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
1176 PMD_INIT_FUNC_TRACE();
1177 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1180 if (!adapter->stopped)
1181 ret = eth_igc_stop(dev);
1183 igc_flow_flush(dev, NULL);
1184 igc_clear_all_filter(dev);
1186 igc_intr_other_disable(dev);
1188 int ret = rte_intr_callback_unregister(intr_handle,
1189 eth_igc_interrupt_handler, dev);
1190 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
1193 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
1194 DELAY(200 * 1000); /* delay 200ms */
1195 } while (retry++ < 5);
1197 igc_phy_hw_reset(hw);
1198 igc_hw_control_release(hw);
1199 igc_dev_free_queues(dev);
1201 /* Reset any pending lock */
1202 igc_reset_swfw_lock(hw);
1208 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
1210 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1212 hw->vendor_id = pci_dev->id.vendor_id;
1213 hw->device_id = pci_dev->id.device_id;
1214 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1215 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1219 eth_igc_dev_init(struct rte_eth_dev *dev)
1221 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1222 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1223 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1226 PMD_INIT_FUNC_TRACE();
1227 dev->dev_ops = ð_igc_ops;
1228 dev->rx_queue_count = eth_igc_rx_queue_count;
1229 dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
1230 dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
1233 * for secondary processes, we don't initialize any further as primary
1234 * has already done this work. Only check we don't need a different
1237 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1240 rte_eth_copy_pci_info(dev, pci_dev);
1241 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1244 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1246 igc_identify_hardware(dev, pci_dev);
1247 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
1252 igc_get_bus_info(hw);
1254 /* Reset any pending lock */
1255 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
1260 /* Finish initialization */
1261 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
1266 hw->mac.autoneg = 1;
1267 hw->phy.autoneg_wait_to_complete = 0;
1268 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
1270 /* Copper options */
1271 if (hw->phy.media_type == igc_media_type_copper) {
1272 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
1273 hw->phy.disable_polarity_correction = 0;
1274 hw->phy.ms_type = igc_ms_hw_default;
1278 * Start from a known state, this is important in reading the nvm
1279 * and mac from that.
1283 /* Make sure we have a good EEPROM before we read from it */
1284 if (igc_validate_nvm_checksum(hw) < 0) {
1286 * Some PCI-E parts fail the first check due to
1287 * the link being in sleep state, call it again,
1288 * if it fails a second time its a real issue.
1290 if (igc_validate_nvm_checksum(hw) < 0) {
1291 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
1297 /* Read the permanent MAC address out of the EEPROM */
1298 if (igc_read_mac_addr(hw) != 0) {
1299 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
1304 /* Allocate memory for storing MAC addresses */
1305 dev->data->mac_addrs = rte_zmalloc("igc",
1306 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
1307 if (dev->data->mac_addrs == NULL) {
1308 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
1309 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
1314 /* Copy the permanent MAC address */
1315 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1316 &dev->data->mac_addrs[0]);
1318 /* Now initialize the hardware */
1319 if (igc_hardware_init(hw) != 0) {
1320 PMD_INIT_LOG(ERR, "Hardware initialization failed");
1321 rte_free(dev->data->mac_addrs);
1322 dev->data->mac_addrs = NULL;
1327 hw->mac.get_link_status = 1;
1330 /* Indicate SOL/IDER usage */
1331 if (igc_check_reset_block(hw) < 0)
1333 "PHY reset is blocked due to SOL/IDER session.");
1335 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
1336 dev->data->port_id, pci_dev->id.vendor_id,
1337 pci_dev->id.device_id);
1339 rte_intr_callback_register(pci_dev->intr_handle,
1340 eth_igc_interrupt_handler, (void *)dev);
1342 /* enable uio/vfio intr/eventfd mapping */
1343 rte_intr_enable(pci_dev->intr_handle);
1345 /* enable support intr */
1346 igc_intr_other_enable(dev);
1348 /* initiate queue status */
1349 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1350 igc->txq_stats_map[i] = -1;
1351 igc->rxq_stats_map[i] = -1;
1355 igc_clear_all_filter(dev);
1359 igc_hw_control_release(hw);
1364 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
1366 PMD_INIT_FUNC_TRACE();
1367 eth_igc_close(eth_dev);
1372 eth_igc_reset(struct rte_eth_dev *dev)
1376 PMD_INIT_FUNC_TRACE();
1378 ret = eth_igc_dev_uninit(dev);
1382 return eth_igc_dev_init(dev);
1386 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
1388 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1391 rctl = IGC_READ_REG(hw, IGC_RCTL);
1392 rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1393 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1398 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
1400 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1403 rctl = IGC_READ_REG(hw, IGC_RCTL);
1404 rctl &= (~IGC_RCTL_UPE);
1405 if (dev->data->all_multicast == 1)
1406 rctl |= IGC_RCTL_MPE;
1408 rctl &= (~IGC_RCTL_MPE);
1409 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1414 eth_igc_allmulticast_enable(struct rte_eth_dev *dev)
1416 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1419 rctl = IGC_READ_REG(hw, IGC_RCTL);
1420 rctl |= IGC_RCTL_MPE;
1421 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1426 eth_igc_allmulticast_disable(struct rte_eth_dev *dev)
1428 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1431 if (dev->data->promiscuous == 1)
1432 return 0; /* must remain in all_multicast mode */
1434 rctl = IGC_READ_REG(hw, IGC_RCTL);
1435 rctl &= (~IGC_RCTL_MPE);
1436 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1441 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1444 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1445 struct igc_fw_version fw;
1448 igc_get_fw_version(hw, &fw);
1450 /* if option rom is valid, display its version too */
1452 ret = snprintf(fw_version, fw_size,
1453 "%d.%d, 0x%08x, %d.%d.%d",
1454 fw.eep_major, fw.eep_minor, fw.etrack_id,
1455 fw.or_major, fw.or_build, fw.or_patch);
1458 if (fw.etrack_id != 0X0000) {
1459 ret = snprintf(fw_version, fw_size,
1461 fw.eep_major, fw.eep_minor,
1464 ret = snprintf(fw_version, fw_size,
1466 fw.eep_major, fw.eep_minor,
1473 ret += 1; /* add the size of '\0' */
1474 if (fw_size < (size_t)ret)
1481 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1483 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1485 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1486 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
1487 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1488 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
1489 dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
1490 dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
1491 dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1493 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
1494 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
1495 dev_info->max_vmdq_pools = 0;
1497 dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
1498 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1499 dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
1501 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1503 .pthresh = IGC_DEFAULT_RX_PTHRESH,
1504 .hthresh = IGC_DEFAULT_RX_HTHRESH,
1505 .wthresh = IGC_DEFAULT_RX_WTHRESH,
1507 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,
1512 dev_info->default_txconf = (struct rte_eth_txconf) {
1514 .pthresh = IGC_DEFAULT_TX_PTHRESH,
1515 .hthresh = IGC_DEFAULT_TX_HTHRESH,
1516 .wthresh = IGC_DEFAULT_TX_WTHRESH,
1521 dev_info->rx_desc_lim = rx_desc_lim;
1522 dev_info->tx_desc_lim = tx_desc_lim;
1524 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
1525 RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
1526 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
1528 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
1529 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1534 eth_igc_led_on(struct rte_eth_dev *dev)
1536 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1538 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1542 eth_igc_led_off(struct rte_eth_dev *dev)
1544 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1546 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1549 static const uint32_t *
1550 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
1552 static const uint32_t ptypes[] = {
1553 /* refers to rx_desc_pkt_info_to_pkt_type() */
1556 RTE_PTYPE_L3_IPV4_EXT,
1558 RTE_PTYPE_L3_IPV6_EXT,
1562 RTE_PTYPE_TUNNEL_IP,
1563 RTE_PTYPE_INNER_L3_IPV6,
1564 RTE_PTYPE_INNER_L3_IPV6_EXT,
1565 RTE_PTYPE_INNER_L4_TCP,
1566 RTE_PTYPE_INNER_L4_UDP,
1574 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1576 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1577 uint32_t frame_size = mtu + IGC_ETH_OVERHEAD;
1580 /* if extend vlan has been enabled */
1581 if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
1582 frame_size += VLAN_TAG_SIZE;
1585 * If device is started, refuse mtu that requires the support of
1586 * scattered packets when this feature has not been enabled before.
1588 if (dev->data->dev_started && !dev->data->scattered_rx &&
1589 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
1590 PMD_INIT_LOG(ERR, "Stop port first.");
1594 rctl = IGC_READ_REG(hw, IGC_RCTL);
1595 if (mtu > RTE_ETHER_MTU)
1596 rctl |= IGC_RCTL_LPE;
1598 rctl &= ~IGC_RCTL_LPE;
1599 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1601 IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
1607 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1608 uint32_t index, uint32_t pool)
1610 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1612 igc_rar_set(hw, mac_addr->addr_bytes, index);
1618 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1620 uint8_t addr[RTE_ETHER_ADDR_LEN];
1621 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1623 memset(addr, 0, sizeof(addr));
1624 igc_rar_set(hw, addr, index);
1628 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
1629 struct rte_ether_addr *addr)
1631 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1632 igc_rar_set(hw, addr->addr_bytes, 0);
1637 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
1638 struct rte_ether_addr *mc_addr_set,
1639 uint32_t nb_mc_addr)
1641 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1642 igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
1647 * Read hardware registers
1650 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)
1654 uint64_t old_gprc = stats->gprc;
1655 uint64_t old_gptc = stats->gptc;
1656 uint64_t old_tpr = stats->tpr;
1657 uint64_t old_tpt = stats->tpt;
1658 uint64_t old_rpthc = stats->rpthc;
1659 uint64_t old_hgptc = stats->hgptc;
1661 stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);
1662 stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);
1663 stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);
1664 stats->mpc += IGC_READ_REG(hw, IGC_MPC);
1665 stats->scc += IGC_READ_REG(hw, IGC_SCC);
1666 stats->ecol += IGC_READ_REG(hw, IGC_ECOL);
1668 stats->mcc += IGC_READ_REG(hw, IGC_MCC);
1669 stats->latecol += IGC_READ_REG(hw, IGC_LATECOL);
1670 stats->colc += IGC_READ_REG(hw, IGC_COLC);
1672 stats->dc += IGC_READ_REG(hw, IGC_DC);
1673 stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);
1674 stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);
1675 stats->rlec += IGC_READ_REG(hw, IGC_RLEC);
1676 stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);
1677 stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);
1680 * For watchdog management we need to know if we have been
1681 * paused during the last interval, so capture that here.
1683 pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);
1684 stats->xoffrxc += pause_frames;
1685 stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);
1686 stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);
1687 stats->prc64 += IGC_READ_REG(hw, IGC_PRC64);
1688 stats->prc127 += IGC_READ_REG(hw, IGC_PRC127);
1689 stats->prc255 += IGC_READ_REG(hw, IGC_PRC255);
1690 stats->prc511 += IGC_READ_REG(hw, IGC_PRC511);
1691 stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);
1692 stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);
1693 stats->gprc += IGC_READ_REG(hw, IGC_GPRC);
1694 stats->bprc += IGC_READ_REG(hw, IGC_BPRC);
1695 stats->mprc += IGC_READ_REG(hw, IGC_MPRC);
1696 stats->gptc += IGC_READ_REG(hw, IGC_GPTC);
1698 /* For the 64-bit byte counters the low dword must be read first. */
1699 /* Both registers clear on the read of the high dword */
1701 /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1702 stats->gorc += IGC_READ_REG(hw, IGC_GORCL);
1703 stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);
1704 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1705 stats->gotc += IGC_READ_REG(hw, IGC_GOTCL);
1706 stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);
1707 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1709 stats->rnbc += IGC_READ_REG(hw, IGC_RNBC);
1710 stats->ruc += IGC_READ_REG(hw, IGC_RUC);
1711 stats->rfc += IGC_READ_REG(hw, IGC_RFC);
1712 stats->roc += IGC_READ_REG(hw, IGC_ROC);
1713 stats->rjc += IGC_READ_REG(hw, IGC_RJC);
1715 stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);
1716 stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);
1717 stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);
1718 stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);
1719 stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);
1720 stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);
1721 stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);
1723 stats->tpr += IGC_READ_REG(hw, IGC_TPR);
1724 stats->tpt += IGC_READ_REG(hw, IGC_TPT);
1726 stats->tor += IGC_READ_REG(hw, IGC_TORL);
1727 stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);
1728 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1729 stats->tot += IGC_READ_REG(hw, IGC_TOTL);
1730 stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);
1731 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1733 stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);
1734 stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);
1735 stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);
1736 stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);
1737 stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);
1738 stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);
1739 stats->mptc += IGC_READ_REG(hw, IGC_MPTC);
1740 stats->bptc += IGC_READ_REG(hw, IGC_BPTC);
1741 stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);
1743 stats->iac += IGC_READ_REG(hw, IGC_IAC);
1744 stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);
1745 stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);
1746 stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);
1748 /* Host to Card Statistics */
1749 stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);
1750 stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);
1751 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1752 stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);
1753 stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);
1754 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1755 stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);
1759 * Write 0 to all queue status registers
1762 igc_reset_queue_stats_register(struct igc_hw *hw)
1766 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1767 IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);
1768 IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);
1769 IGC_WRITE_REG(hw, IGC_PQGORC(i), 0);
1770 IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);
1771 IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);
1772 IGC_WRITE_REG(hw, IGC_RQDPC(i), 0);
1773 IGC_WRITE_REG(hw, IGC_TQDPC(i), 0);
1778 * Read all hardware queue status registers
1781 igc_read_queue_stats_register(struct rte_eth_dev *dev)
1783 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1784 struct igc_hw_queue_stats *queue_stats =
1785 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1789 * This register is not cleared on read. Furthermore, the register wraps
1790 * around back to 0x00000000 on the next increment when reaching a value
1791 * of 0xFFFFFFFF and then continues normal count operation.
1793 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1801 * Read the register first, if the value is smaller than that
1802 * previous read, that mean the register has been overflowed,
1803 * then we add the high 4 bytes by 1 and replace the low 4
1804 * bytes by the new value.
1806 tmp = IGC_READ_REG(hw, IGC_PQGPRC(i));
1807 value.ddword = queue_stats->pqgprc[i];
1808 if (value.dword[U32_0_IN_U64] > tmp)
1809 value.dword[U32_1_IN_U64]++;
1810 value.dword[U32_0_IN_U64] = tmp;
1811 queue_stats->pqgprc[i] = value.ddword;
1813 tmp = IGC_READ_REG(hw, IGC_PQGPTC(i));
1814 value.ddword = queue_stats->pqgptc[i];
1815 if (value.dword[U32_0_IN_U64] > tmp)
1816 value.dword[U32_1_IN_U64]++;
1817 value.dword[U32_0_IN_U64] = tmp;
1818 queue_stats->pqgptc[i] = value.ddword;
1820 tmp = IGC_READ_REG(hw, IGC_PQGORC(i));
1821 value.ddword = queue_stats->pqgorc[i];
1822 if (value.dword[U32_0_IN_U64] > tmp)
1823 value.dword[U32_1_IN_U64]++;
1824 value.dword[U32_0_IN_U64] = tmp;
1825 queue_stats->pqgorc[i] = value.ddword;
1827 tmp = IGC_READ_REG(hw, IGC_PQGOTC(i));
1828 value.ddword = queue_stats->pqgotc[i];
1829 if (value.dword[U32_0_IN_U64] > tmp)
1830 value.dword[U32_1_IN_U64]++;
1831 value.dword[U32_0_IN_U64] = tmp;
1832 queue_stats->pqgotc[i] = value.ddword;
1834 tmp = IGC_READ_REG(hw, IGC_PQMPRC(i));
1835 value.ddword = queue_stats->pqmprc[i];
1836 if (value.dword[U32_0_IN_U64] > tmp)
1837 value.dword[U32_1_IN_U64]++;
1838 value.dword[U32_0_IN_U64] = tmp;
1839 queue_stats->pqmprc[i] = value.ddword;
1841 tmp = IGC_READ_REG(hw, IGC_RQDPC(i));
1842 value.ddword = queue_stats->rqdpc[i];
1843 if (value.dword[U32_0_IN_U64] > tmp)
1844 value.dword[U32_1_IN_U64]++;
1845 value.dword[U32_0_IN_U64] = tmp;
1846 queue_stats->rqdpc[i] = value.ddword;
1848 tmp = IGC_READ_REG(hw, IGC_TQDPC(i));
1849 value.ddword = queue_stats->tqdpc[i];
1850 if (value.dword[U32_0_IN_U64] > tmp)
1851 value.dword[U32_1_IN_U64]++;
1852 value.dword[U32_0_IN_U64] = tmp;
1853 queue_stats->tqdpc[i] = value.ddword;
1858 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1860 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1861 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1862 struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev);
1863 struct igc_hw_queue_stats *queue_stats =
1864 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1868 * Cancel status handler since it will read the queue status registers
1870 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1872 /* Read status register */
1873 igc_read_queue_stats_register(dev);
1874 igc_read_stats_registers(hw, stats);
1876 if (rte_stats == NULL) {
1877 /* Restart queue status handler */
1878 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1879 igc_update_queue_stats_handler, dev);
1884 rte_stats->imissed = stats->mpc;
1885 rte_stats->ierrors = stats->crcerrs + stats->rlec +
1886 stats->rxerrc + stats->algnerrc;
1889 rte_stats->oerrors = stats->ecol + stats->latecol;
1891 rte_stats->ipackets = stats->gprc;
1892 rte_stats->opackets = stats->gptc;
1893 rte_stats->ibytes = stats->gorc;
1894 rte_stats->obytes = stats->gotc;
1896 /* Get per-queue statuses */
1897 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1898 /* GET TX queue statuses */
1899 int map_id = igc->txq_stats_map[i];
1901 rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i];
1902 rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i];
1904 /* Get RX queue statuses */
1905 map_id = igc->rxq_stats_map[i];
1907 rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i];
1908 rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i];
1909 rte_stats->q_errors[map_id] += queue_stats->rqdpc[i];
1913 /* Restart queue status handler */
1914 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1915 igc_update_queue_stats_handler, dev);
1920 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1923 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1924 struct igc_hw_stats *hw_stats =
1925 IGC_DEV_PRIVATE_STATS(dev);
1928 igc_read_stats_registers(hw, hw_stats);
1930 if (n < IGC_NB_XSTATS)
1931 return IGC_NB_XSTATS;
1933 /* If this is a reset xstats is NULL, and we have cleared the
1934 * registers by reading them.
1939 /* Extended stats */
1940 for (i = 0; i < IGC_NB_XSTATS; i++) {
1942 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1943 rte_igc_stats_strings[i].offset);
1946 return IGC_NB_XSTATS;
1950 eth_igc_xstats_reset(struct rte_eth_dev *dev)
1952 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1953 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
1954 struct igc_hw_queue_stats *queue_stats =
1955 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1957 /* Cancel queue status handler for avoid conflict */
1958 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1960 /* HW registers are cleared on read */
1961 igc_reset_queue_stats_register(hw);
1962 igc_read_stats_registers(hw, hw_stats);
1964 /* Reset software totals */
1965 memset(hw_stats, 0, sizeof(*hw_stats));
1966 memset(queue_stats, 0, sizeof(*queue_stats));
1968 /* Restart the queue status handler */
1969 rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,
1976 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1977 struct rte_eth_xstat_name *xstats_names, unsigned int size)
1981 if (xstats_names == NULL)
1982 return IGC_NB_XSTATS;
1984 if (size < IGC_NB_XSTATS) {
1985 PMD_DRV_LOG(ERR, "not enough buffers!");
1986 return IGC_NB_XSTATS;
1989 for (i = 0; i < IGC_NB_XSTATS; i++)
1990 strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,
1991 sizeof(xstats_names[i].name));
1993 return IGC_NB_XSTATS;
1997 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
1998 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
2004 return eth_igc_xstats_get_names(dev, xstats_names, limit);
2006 for (i = 0; i < limit; i++) {
2007 if (ids[i] >= IGC_NB_XSTATS) {
2008 PMD_DRV_LOG(ERR, "id value isn't valid");
2011 strlcpy(xstats_names[i].name,
2012 rte_igc_stats_strings[ids[i]].name,
2013 sizeof(xstats_names[i].name));
2019 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2020 uint64_t *values, unsigned int n)
2022 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2023 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
2026 igc_read_stats_registers(hw, hw_stats);
2029 if (n < IGC_NB_XSTATS)
2030 return IGC_NB_XSTATS;
2032 /* If this is a reset xstats is NULL, and we have cleared the
2033 * registers by reading them.
2038 /* Extended stats */
2039 for (i = 0; i < IGC_NB_XSTATS; i++)
2040 values[i] = *(uint64_t *)(((char *)hw_stats) +
2041 rte_igc_stats_strings[i].offset);
2043 return IGC_NB_XSTATS;
2046 for (i = 0; i < n; i++) {
2047 if (ids[i] >= IGC_NB_XSTATS) {
2048 PMD_DRV_LOG(ERR, "id value isn't valid");
2051 values[i] = *(uint64_t *)(((char *)hw_stats) +
2052 rte_igc_stats_strings[ids[i]].offset);
2059 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
2060 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx)
2062 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
2064 /* check queue id is valid */
2065 if (queue_id >= IGC_QUEUE_PAIRS_NUM) {
2066 PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u",
2067 queue_id, IGC_QUEUE_PAIRS_NUM - 1);
2071 /* store the mapping status id */
2073 igc->rxq_stats_map[queue_id] = stat_idx;
2075 igc->txq_stats_map[queue_id] = stat_idx;
2081 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2083 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2084 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2085 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2086 uint32_t vec = IGC_MISC_VEC_ID;
2088 if (rte_intr_allow_others(intr_handle))
2089 vec = IGC_RX_VEC_START;
2091 uint32_t mask = 1u << (queue_id + vec);
2093 IGC_WRITE_REG(hw, IGC_EIMC, mask);
2094 IGC_WRITE_FLUSH(hw);
2100 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2102 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2103 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2104 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2105 uint32_t vec = IGC_MISC_VEC_ID;
2107 if (rte_intr_allow_others(intr_handle))
2108 vec = IGC_RX_VEC_START;
2110 uint32_t mask = 1u << (queue_id + vec);
2112 IGC_WRITE_REG(hw, IGC_EIMS, mask);
2113 IGC_WRITE_FLUSH(hw);
2115 rte_intr_enable(intr_handle);
2121 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2123 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2128 fc_conf->pause_time = hw->fc.pause_time;
2129 fc_conf->high_water = hw->fc.high_water;
2130 fc_conf->low_water = hw->fc.low_water;
2131 fc_conf->send_xon = hw->fc.send_xon;
2132 fc_conf->autoneg = hw->mac.autoneg;
2135 * Return rx_pause and tx_pause status according to actual setting of
2136 * the TFCE and RFCE bits in the CTRL register.
2138 ctrl = IGC_READ_REG(hw, IGC_CTRL);
2139 if (ctrl & IGC_CTRL_TFCE)
2144 if (ctrl & IGC_CTRL_RFCE)
2149 if (rx_pause && tx_pause)
2150 fc_conf->mode = RTE_ETH_FC_FULL;
2152 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2154 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2156 fc_conf->mode = RTE_ETH_FC_NONE;
2162 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2164 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2165 uint32_t rx_buf_size;
2166 uint32_t max_high_water;
2170 if (fc_conf->autoneg != hw->mac.autoneg)
2173 rx_buf_size = igc_get_rx_buffer_size(hw);
2174 PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2176 /* At least reserve one Ethernet frame for watermark */
2177 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
2178 if (fc_conf->high_water > max_high_water ||
2179 fc_conf->high_water < fc_conf->low_water) {
2181 "Incorrect high(%u)/low(%u) water value, max is %u",
2182 fc_conf->high_water, fc_conf->low_water,
2187 switch (fc_conf->mode) {
2188 case RTE_ETH_FC_NONE:
2189 hw->fc.requested_mode = igc_fc_none;
2191 case RTE_ETH_FC_RX_PAUSE:
2192 hw->fc.requested_mode = igc_fc_rx_pause;
2194 case RTE_ETH_FC_TX_PAUSE:
2195 hw->fc.requested_mode = igc_fc_tx_pause;
2197 case RTE_ETH_FC_FULL:
2198 hw->fc.requested_mode = igc_fc_full;
2201 PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode);
2205 hw->fc.pause_time = fc_conf->pause_time;
2206 hw->fc.high_water = fc_conf->high_water;
2207 hw->fc.low_water = fc_conf->low_water;
2208 hw->fc.send_xon = fc_conf->send_xon;
2210 err = igc_setup_link_generic(hw);
2211 if (err == IGC_SUCCESS) {
2213 * check if we want to forward MAC frames - driver doesn't have
2214 * native capability to do that, so we'll write the registers
2217 rctl = IGC_READ_REG(hw, IGC_RCTL);
2219 /* set or clear MFLCN.PMCF bit depending on configuration */
2220 if (fc_conf->mac_ctrl_frame_fwd != 0)
2221 rctl |= IGC_RCTL_PMCF;
2223 rctl &= ~IGC_RCTL_PMCF;
2225 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
2226 IGC_WRITE_FLUSH(hw);
2231 PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err);
2236 eth_igc_rss_reta_update(struct rte_eth_dev *dev,
2237 struct rte_eth_rss_reta_entry64 *reta_conf,
2240 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2243 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2245 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2246 reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2250 RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
2252 /* set redirection table */
2253 for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2254 union igc_rss_reta_reg reta, reg;
2255 uint16_t idx, shift;
2258 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2259 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2260 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2261 IGC_RSS_RDT_REG_SIZE_MASK);
2263 /* if no need to update the register */
2265 shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2268 /* check mask whether need to read the register value first */
2269 if (mask == IGC_RSS_RDT_REG_SIZE_MASK)
2272 reg.dword = IGC_READ_REG_LE_VALUE(hw,
2273 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2275 /* update the register */
2276 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2277 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2278 if (mask & (1u << j))
2280 (uint8_t)reta_conf[idx].reta[shift + j];
2282 reta.bytes[j] = reg.bytes[j];
2284 IGC_WRITE_REG_LE_VALUE(hw,
2285 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword);
2292 eth_igc_rss_reta_query(struct rte_eth_dev *dev,
2293 struct rte_eth_rss_reta_entry64 *reta_conf,
2296 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2299 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2301 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2302 reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2306 RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
2308 /* read redirection table */
2309 for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2310 union igc_rss_reta_reg reta;
2311 uint16_t idx, shift;
2314 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2315 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2316 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2317 IGC_RSS_RDT_REG_SIZE_MASK);
2319 /* if no need to read register */
2321 shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2324 /* read register and get the queue index */
2325 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2326 reta.dword = IGC_READ_REG_LE_VALUE(hw,
2327 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2328 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2329 if (mask & (1u << j))
2330 reta_conf[idx].reta[shift + j] = reta.bytes[j];
2338 eth_igc_rss_hash_update(struct rte_eth_dev *dev,
2339 struct rte_eth_rss_conf *rss_conf)
2341 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2342 igc_hw_rss_hash_set(hw, rss_conf);
2347 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
2348 struct rte_eth_rss_conf *rss_conf)
2350 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2351 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
2355 if (hash_key != NULL) {
2358 /* if not enough space for store hash key */
2359 if (rss_conf->rss_key_len != IGC_HKEY_SIZE) {
2361 "RSS hash key size %u in parameter doesn't match the hardware hash key size %u",
2362 rss_conf->rss_key_len, IGC_HKEY_SIZE);
2366 /* read RSS key from register */
2367 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
2368 hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i));
2371 /* get RSS functions configured in MRQC register */
2372 mrqc = IGC_READ_REG(hw, IGC_MRQC);
2373 if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0)
2377 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
2378 rss_hf |= RTE_ETH_RSS_IPV4;
2379 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
2380 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2381 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
2382 rss_hf |= RTE_ETH_RSS_IPV6;
2383 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
2384 rss_hf |= RTE_ETH_RSS_IPV6_EX;
2385 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
2386 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
2387 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
2388 rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
2389 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
2390 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2391 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
2392 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
2393 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
2394 rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
2396 rss_conf->rss_hf |= rss_hf;
2401 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2403 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2404 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2409 vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK;
2410 vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK);
2411 vfta = shadow_vfta->vfta[vid_idx];
2416 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta);
2418 /* update local VFTA copy */
2419 shadow_vfta->vfta[vid_idx] = vfta;
2425 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2427 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2428 igc_read_reg_check_clear_bits(hw, IGC_RCTL,
2429 IGC_RCTL_CFIEN | IGC_RCTL_VFE);
2433 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2435 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2436 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2440 /* Filter Table Enable, CFI not used for packet acceptance */
2441 reg_val = IGC_READ_REG(hw, IGC_RCTL);
2442 reg_val &= ~IGC_RCTL_CFIEN;
2443 reg_val |= IGC_RCTL_VFE;
2444 IGC_WRITE_REG(hw, IGC_RCTL, reg_val);
2446 /* restore VFTA table */
2447 for (i = 0; i < IGC_VFTA_SIZE; i++)
2448 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]);
2452 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2454 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2456 igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2460 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2462 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2464 igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2468 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2470 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2471 uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
2474 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2476 /* if extend vlan hasn't been enabled */
2477 if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
2480 /* Update maximum packet length */
2481 if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
2482 PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
2483 frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
2486 IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE);
2488 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
2493 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2495 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2496 uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
2499 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2501 /* if extend vlan has been enabled */
2502 if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
2505 /* Update maximum packet length */
2506 if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) {
2507 PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
2508 frame_size, MAX_RX_JUMBO_FRAME_SIZE);
2511 IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
2513 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
2518 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2520 struct rte_eth_rxmode *rxmode;
2522 rxmode = &dev->data->dev_conf.rxmode;
2523 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
2524 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2525 igc_vlan_hw_strip_enable(dev);
2527 igc_vlan_hw_strip_disable(dev);
2530 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
2531 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
2532 igc_vlan_hw_filter_enable(dev);
2534 igc_vlan_hw_filter_disable(dev);
2537 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
2538 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
2539 return igc_vlan_hw_extend_enable(dev);
2541 return igc_vlan_hw_extend_disable(dev);
2548 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
2549 enum rte_vlan_type vlan_type,
2552 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2555 /* only outer TPID of double VLAN can be configured*/
2556 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
2557 reg_val = IGC_READ_REG(hw, IGC_VET);
2558 reg_val = (reg_val & (~IGC_VET_EXT)) |
2559 ((uint32_t)tpid << IGC_VET_EXT_SHIFT);
2560 IGC_WRITE_REG(hw, IGC_VET, reg_val);
2565 /* all other TPID values are read-only*/
2566 PMD_DRV_LOG(ERR, "Not supported");
2571 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2572 struct rte_pci_device *pci_dev)
2574 PMD_INIT_FUNC_TRACE();
2575 return rte_eth_dev_pci_generic_probe(pci_dev,
2576 sizeof(struct igc_adapter), eth_igc_dev_init);
2580 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
2582 PMD_INIT_FUNC_TRACE();
2583 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
2586 static struct rte_pci_driver rte_igc_pmd = {
2587 .id_table = pci_id_igc_map,
2588 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2589 .probe = eth_igc_pci_probe,
2590 .remove = eth_igc_pci_remove,
2593 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
2594 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
2595 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");