1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
8 #include <rte_string_fns.h>
10 #include <rte_bus_pci.h>
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
13 #include <rte_malloc.h>
14 #include <rte_alarm.h>
18 #include "igc_filter.h"
21 #define IGC_INTEL_VENDOR_ID 0x8086
23 #define IGC_FC_PAUSE_TIME 0x0680
24 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
25 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
27 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
28 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
29 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */
30 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */
32 #define IGC_DEFAULT_RX_FREE_THRESH 32
34 #define IGC_DEFAULT_RX_PTHRESH 8
35 #define IGC_DEFAULT_RX_HTHRESH 8
36 #define IGC_DEFAULT_RX_WTHRESH 4
38 #define IGC_DEFAULT_TX_PTHRESH 8
39 #define IGC_DEFAULT_TX_HTHRESH 1
40 #define IGC_DEFAULT_TX_WTHRESH 16
42 /* MSI-X other interrupt vector */
43 #define IGC_MSIX_OTHER_INTR_VEC 0
45 /* External VLAN Enable bit mask */
46 #define IGC_CTRL_EXT_EXT_VLAN (1u << 26)
49 #define IGC_CTRL_SPEED_MASK (7u << 8)
50 #define IGC_CTRL_SPEED_2500 (6u << 8)
52 /* External VLAN Ether Type bit mask and shift */
53 #define IGC_VET_EXT 0xFFFF0000
54 #define IGC_VET_EXT_SHIFT 16
56 /* Force EEE Auto-negotiation */
57 #define IGC_EEER_EEE_FRC_AN (1u << 28)
59 /* Per Queue Good Packets Received Count */
60 #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx))
61 /* Per Queue Good Octets Received Count */
62 #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx))
63 /* Per Queue Good Octets Transmitted Count */
64 #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx))
65 /* Per Queue Multicast Packets Received Count */
66 #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx))
67 /* Transmit Queue Drop Packet Count */
68 #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx))
70 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
71 #define U32_0_IN_U64 0 /* lower bytes of u64 */
72 #define U32_1_IN_U64 1 /* higher bytes of u64 */
74 #define U32_0_IN_U64 1
75 #define U32_1_IN_U64 0
78 #define IGC_ALARM_INTERVAL 8000000u
79 /* us, about 13.6s some per-queue registers will wrap around back to 0. */
81 static const struct rte_eth_desc_lim rx_desc_lim = {
82 .nb_max = IGC_MAX_RXD,
83 .nb_min = IGC_MIN_RXD,
84 .nb_align = IGC_RXD_ALIGN,
87 static const struct rte_eth_desc_lim tx_desc_lim = {
88 .nb_max = IGC_MAX_TXD,
89 .nb_min = IGC_MIN_TXD,
90 .nb_align = IGC_TXD_ALIGN,
91 .nb_seg_max = IGC_TX_MAX_SEG,
92 .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,
95 static const struct rte_pci_id pci_id_igc_map[] = {
96 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
97 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) },
98 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) },
99 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) },
100 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I226_K) },
101 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I226_LMVP) },
102 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I226_LM) },
103 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I226_V) },
104 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I226_IT) },
105 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I226_BLANK_NVM) },
106 { .vendor_id = 0, /* sentinel */ },
109 /* store statistics names and its offset in stats structure */
110 struct rte_igc_xstats_name_off {
111 char name[RTE_ETH_XSTATS_NAME_SIZE];
115 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {
116 {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)},
117 {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)},
118 {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)},
119 {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)},
120 {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)},
121 {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)},
122 {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats,
124 {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)},
125 {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)},
126 {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)},
127 {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)},
128 {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)},
129 {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)},
130 {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)},
131 {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)},
132 {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)},
133 {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)},
134 {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats,
136 {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)},
137 {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)},
138 {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)},
139 {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)},
140 {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
142 {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats,
144 {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)},
145 {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)},
146 {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)},
147 {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)},
148 {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)},
149 {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)},
150 {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)},
151 {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)},
152 {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)},
153 {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)},
154 {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)},
155 {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)},
156 {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)},
157 {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)},
158 {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)},
159 {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)},
160 {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)},
161 {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)},
162 {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
164 {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats,
166 {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)},
167 {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)},
168 {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)},
169 {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)},
170 {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)},
171 {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)},
172 {"rx_descriptor_lower_threshold",
173 offsetof(struct igc_hw_stats, icrxdmtc)},
176 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \
177 sizeof(rte_igc_stats_strings[0]))
179 static int eth_igc_configure(struct rte_eth_dev *dev);
180 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
181 static int eth_igc_stop(struct rte_eth_dev *dev);
182 static int eth_igc_start(struct rte_eth_dev *dev);
183 static int eth_igc_set_link_up(struct rte_eth_dev *dev);
184 static int eth_igc_set_link_down(struct rte_eth_dev *dev);
185 static int eth_igc_close(struct rte_eth_dev *dev);
186 static int eth_igc_reset(struct rte_eth_dev *dev);
187 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
188 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
189 static int eth_igc_fw_version_get(struct rte_eth_dev *dev,
190 char *fw_version, size_t fw_size);
191 static int eth_igc_infos_get(struct rte_eth_dev *dev,
192 struct rte_eth_dev_info *dev_info);
193 static int eth_igc_led_on(struct rte_eth_dev *dev);
194 static int eth_igc_led_off(struct rte_eth_dev *dev);
195 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);
196 static int eth_igc_rar_set(struct rte_eth_dev *dev,
197 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);
198 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);
199 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
200 struct rte_ether_addr *addr);
201 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
202 struct rte_ether_addr *mc_addr_set,
203 uint32_t nb_mc_addr);
204 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);
205 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);
206 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
207 static int eth_igc_stats_get(struct rte_eth_dev *dev,
208 struct rte_eth_stats *rte_stats);
209 static int eth_igc_xstats_get(struct rte_eth_dev *dev,
210 struct rte_eth_xstat *xstats, unsigned int n);
211 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev,
213 uint64_t *values, unsigned int n);
214 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,
215 struct rte_eth_xstat_name *xstats_names,
217 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
218 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
220 static int eth_igc_xstats_reset(struct rte_eth_dev *dev);
222 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
223 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx);
225 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
227 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
229 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
231 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
232 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev,
233 struct rte_eth_rss_reta_entry64 *reta_conf,
235 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev,
236 struct rte_eth_rss_reta_entry64 *reta_conf,
238 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev,
239 struct rte_eth_rss_conf *rss_conf);
240 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
241 struct rte_eth_rss_conf *rss_conf);
243 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
244 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
245 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
246 enum rte_vlan_type vlan_type, uint16_t tpid);
248 static const struct eth_dev_ops eth_igc_ops = {
249 .dev_configure = eth_igc_configure,
250 .link_update = eth_igc_link_update,
251 .dev_stop = eth_igc_stop,
252 .dev_start = eth_igc_start,
253 .dev_close = eth_igc_close,
254 .dev_reset = eth_igc_reset,
255 .dev_set_link_up = eth_igc_set_link_up,
256 .dev_set_link_down = eth_igc_set_link_down,
257 .promiscuous_enable = eth_igc_promiscuous_enable,
258 .promiscuous_disable = eth_igc_promiscuous_disable,
259 .allmulticast_enable = eth_igc_allmulticast_enable,
260 .allmulticast_disable = eth_igc_allmulticast_disable,
261 .fw_version_get = eth_igc_fw_version_get,
262 .dev_infos_get = eth_igc_infos_get,
263 .dev_led_on = eth_igc_led_on,
264 .dev_led_off = eth_igc_led_off,
265 .dev_supported_ptypes_get = eth_igc_supported_ptypes_get,
266 .mtu_set = eth_igc_mtu_set,
267 .mac_addr_add = eth_igc_rar_set,
268 .mac_addr_remove = eth_igc_rar_clear,
269 .mac_addr_set = eth_igc_default_mac_addr_set,
270 .set_mc_addr_list = eth_igc_set_mc_addr_list,
272 .rx_queue_setup = eth_igc_rx_queue_setup,
273 .rx_queue_release = eth_igc_rx_queue_release,
274 .tx_queue_setup = eth_igc_tx_queue_setup,
275 .tx_queue_release = eth_igc_tx_queue_release,
276 .tx_done_cleanup = eth_igc_tx_done_cleanup,
277 .rxq_info_get = eth_igc_rxq_info_get,
278 .txq_info_get = eth_igc_txq_info_get,
279 .stats_get = eth_igc_stats_get,
280 .xstats_get = eth_igc_xstats_get,
281 .xstats_get_by_id = eth_igc_xstats_get_by_id,
282 .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id,
283 .xstats_get_names = eth_igc_xstats_get_names,
284 .stats_reset = eth_igc_xstats_reset,
285 .xstats_reset = eth_igc_xstats_reset,
286 .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,
287 .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable,
288 .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable,
289 .flow_ctrl_get = eth_igc_flow_ctrl_get,
290 .flow_ctrl_set = eth_igc_flow_ctrl_set,
291 .reta_update = eth_igc_rss_reta_update,
292 .reta_query = eth_igc_rss_reta_query,
293 .rss_hash_update = eth_igc_rss_hash_update,
294 .rss_hash_conf_get = eth_igc_rss_hash_conf_get,
295 .vlan_filter_set = eth_igc_vlan_filter_set,
296 .vlan_offload_set = eth_igc_vlan_offload_set,
297 .vlan_tpid_set = eth_igc_vlan_tpid_set,
298 .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set,
299 .flow_ops_get = eth_igc_flow_ops_get,
303 * multiple queue mode checking
306 igc_check_mq_mode(struct rte_eth_dev *dev)
308 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
309 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
311 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
312 PMD_INIT_LOG(ERR, "SRIOV is not supported.");
316 if (rx_mq_mode != RTE_ETH_MQ_RX_NONE &&
317 rx_mq_mode != RTE_ETH_MQ_RX_RSS) {
318 /* RSS together with VMDq not supported*/
319 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
324 /* To no break software that set invalid mode, only display
325 * warning if invalid mode is used.
327 if (tx_mq_mode != RTE_ETH_MQ_TX_NONE)
328 PMD_INIT_LOG(WARNING,
329 "TX mode %d is not supported. Due to meaningless in this driver, just ignore",
336 eth_igc_configure(struct rte_eth_dev *dev)
338 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
341 PMD_INIT_FUNC_TRACE();
343 if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
344 dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
346 ret = igc_check_mq_mode(dev);
350 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
355 eth_igc_set_link_up(struct rte_eth_dev *dev)
357 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
359 if (hw->phy.media_type == igc_media_type_copper)
360 igc_power_up_phy(hw);
362 igc_power_up_fiber_serdes_link(hw);
367 eth_igc_set_link_down(struct rte_eth_dev *dev)
369 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
371 if (hw->phy.media_type == igc_media_type_copper)
372 igc_power_down_phy(hw);
374 igc_shutdown_fiber_serdes_link(hw);
379 * disable other interrupt
382 igc_intr_other_disable(struct rte_eth_dev *dev)
384 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
385 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
386 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
388 if (rte_intr_allow_others(intr_handle) &&
389 dev->data->dev_conf.intr_conf.lsc) {
390 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC);
393 IGC_WRITE_REG(hw, IGC_IMC, ~0);
398 * enable other interrupt
401 igc_intr_other_enable(struct rte_eth_dev *dev)
403 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
404 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
405 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
406 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
408 if (rte_intr_allow_others(intr_handle) &&
409 dev->data->dev_conf.intr_conf.lsc) {
410 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC);
413 IGC_WRITE_REG(hw, IGC_IMS, intr->mask);
418 * It reads ICR and gets interrupt causes, check it and set a bit flag
419 * to update link status.
422 eth_igc_interrupt_get_status(struct rte_eth_dev *dev)
425 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
426 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
428 /* read-on-clear nic registers here */
429 icr = IGC_READ_REG(hw, IGC_ICR);
432 if (icr & IGC_ICR_LSC)
433 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
436 /* return 0 means link status changed, -1 means not changed */
438 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
440 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
441 struct rte_eth_link link;
442 int link_check, count;
445 hw->mac.get_link_status = 1;
447 /* possible wait-to-complete in up to 9 seconds */
448 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {
449 /* Read the real link status */
450 switch (hw->phy.media_type) {
451 case igc_media_type_copper:
452 /* Do the work to read phy */
453 igc_check_for_link(hw);
454 link_check = !hw->mac.get_link_status;
457 case igc_media_type_fiber:
458 igc_check_for_link(hw);
459 link_check = (IGC_READ_REG(hw, IGC_STATUS) &
463 case igc_media_type_internal_serdes:
464 igc_check_for_link(hw);
465 link_check = hw->mac.serdes_has_link;
471 if (link_check || wait_to_complete == 0)
473 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);
475 memset(&link, 0, sizeof(link));
477 /* Now we check if a transition has happened */
479 uint16_t duplex, speed;
480 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
481 link.link_duplex = (duplex == FULL_DUPLEX) ?
482 RTE_ETH_LINK_FULL_DUPLEX :
483 RTE_ETH_LINK_HALF_DUPLEX;
484 link.link_speed = speed;
485 link.link_status = RTE_ETH_LINK_UP;
486 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
487 RTE_ETH_LINK_SPEED_FIXED);
489 if (speed == SPEED_2500) {
490 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
491 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {
492 tipg &= ~IGC_TIPG_IPGT_MASK;
494 IGC_WRITE_REG(hw, IGC_TIPG, tipg);
499 link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
500 link.link_status = RTE_ETH_LINK_DOWN;
501 link.link_autoneg = RTE_ETH_LINK_FIXED;
504 return rte_eth_linkstatus_set(dev, &link);
508 * It executes link_update after knowing an interrupt is present.
511 eth_igc_interrupt_action(struct rte_eth_dev *dev)
513 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
514 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
515 struct rte_eth_link link;
518 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {
519 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
521 /* set get_link_status to check register later */
522 ret = eth_igc_link_update(dev, 0);
524 /* check if link has changed */
528 rte_eth_linkstatus_get(dev, &link);
529 if (link.link_status)
531 " Port %d: Link Up - speed %u Mbps - %s",
533 (unsigned int)link.link_speed,
534 link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX ?
535 "full-duplex" : "half-duplex");
537 PMD_DRV_LOG(INFO, " Port %d: Link Down",
540 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
541 pci_dev->addr.domain,
544 pci_dev->addr.function);
545 rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
550 * Interrupt handler which shall be registered at first.
553 * Pointer to interrupt handle.
555 * The address of parameter (struct rte_eth_dev *) registered before.
558 eth_igc_interrupt_handler(void *param)
560 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
562 eth_igc_interrupt_get_status(dev);
563 eth_igc_interrupt_action(dev);
566 static void igc_read_queue_stats_register(struct rte_eth_dev *dev);
569 * Update the queue status every IGC_ALARM_INTERVAL time.
571 * The address of parameter (struct rte_eth_dev *) registered before.
574 igc_update_queue_stats_handler(void *param)
576 struct rte_eth_dev *dev = param;
577 igc_read_queue_stats_register(dev);
578 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
579 igc_update_queue_stats_handler, dev);
583 * rx,tx enable/disable
586 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)
588 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
591 tctl = IGC_READ_REG(hw, IGC_TCTL);
592 rctl = IGC_READ_REG(hw, IGC_RCTL);
600 tctl &= ~IGC_TCTL_EN;
601 rctl &= ~IGC_RCTL_EN;
603 IGC_WRITE_REG(hw, IGC_TCTL, tctl);
604 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
609 * This routine disables all traffic on the adapter by issuing a
610 * global reset on the MAC.
613 eth_igc_stop(struct rte_eth_dev *dev)
615 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
616 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
617 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
618 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
619 struct rte_eth_link link;
621 dev->data->dev_started = 0;
622 adapter->stopped = 1;
624 /* disable receive and transmit */
625 eth_igc_rxtx_control(dev, false);
627 /* disable all MSI-X interrupts */
628 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
631 /* clear all MSI-X interrupts */
632 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
634 igc_intr_other_disable(dev);
636 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
638 /* disable intr eventfd mapping */
639 rte_intr_disable(intr_handle);
643 /* disable all wake up */
644 IGC_WRITE_REG(hw, IGC_WUC, 0);
646 /* disable checking EEE operation in MAC loopback mode */
647 igc_read_reg_check_clear_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
649 /* Set bit for Go Link disconnect */
650 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
651 IGC_82580_PM_GO_LINKD);
653 /* Power down the phy. Needed to make the link go Down */
654 eth_igc_set_link_down(dev);
656 igc_dev_clear_queues(dev);
658 /* clear the recorded link status */
659 memset(&link, 0, sizeof(link));
660 rte_eth_linkstatus_set(dev, &link);
662 if (!rte_intr_allow_others(intr_handle))
663 /* resume to the default handler */
664 rte_intr_callback_register(intr_handle,
665 eth_igc_interrupt_handler,
668 /* Clean datapath event and queue/vec mapping */
669 rte_intr_efd_disable(intr_handle);
670 rte_intr_vec_list_free(intr_handle);
676 * write interrupt vector allocation register
678 * board private structure
680 * queue index, valid 0,1,2,3
684 * msix-vector, valid 0,1,2,3,4
687 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index,
688 bool tx, uint8_t msix_vector)
691 uint8_t reg_index = queue_index >> 1;
696 * bit31...24 bit23...16 bit15...8 bit7...0
700 * bit31...24 bit23...16 bit15...8 bit7...0
710 val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index);
713 val &= ~((uint32_t)0xFF << offset);
715 /* write vector and valid bit */
716 val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset;
718 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val);
721 /* Sets up the hardware to generate MSI-X interrupts properly
723 * board private structure
726 igc_configure_msix_intr(struct rte_eth_dev *dev)
728 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
729 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
730 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
733 uint32_t vec = IGC_MISC_VEC_ID;
734 uint32_t base = IGC_MISC_VEC_ID;
735 uint32_t misc_shift = 0;
738 /* won't configure msix register if no mapping is done
739 * between intr vector and event fd
741 if (!rte_intr_dp_is_en(intr_handle))
744 if (rte_intr_allow_others(intr_handle)) {
745 base = IGC_RX_VEC_START;
750 /* turn on MSI-X capability first */
751 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
752 IGC_GPIE_PBA | IGC_GPIE_EIAME |
755 nb_efd = rte_intr_nb_efd_get(intr_handle);
759 intr_mask = RTE_LEN2MASK(nb_efd, uint32_t) << misc_shift;
761 if (dev->data->dev_conf.intr_conf.lsc)
762 intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC);
764 /* enable msix auto-clear */
765 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask);
767 /* set other cause interrupt vector */
768 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC,
769 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8);
771 /* enable auto-mask */
772 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask);
774 for (i = 0; i < dev->data->nb_rx_queues; i++) {
775 igc_write_ivar(hw, i, 0, vec);
776 rte_intr_vec_list_index_set(intr_handle, i, vec);
777 if (vec < base + rte_intr_nb_efd_get(intr_handle) - 1)
785 * It enables the interrupt mask and then enable the interrupt.
788 * Pointer to struct rte_eth_dev.
793 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
795 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
798 intr->mask |= IGC_ICR_LSC;
800 intr->mask &= ~IGC_ICR_LSC;
804 * It enables the interrupt.
805 * It will be called once only during nic initialized.
808 igc_rxq_interrupt_setup(struct rte_eth_dev *dev)
811 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
812 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
813 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
814 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
817 /* won't configure msix register if no mapping is done
818 * between intr vector and event fd
820 if (!rte_intr_dp_is_en(intr_handle))
823 nb_efd = rte_intr_nb_efd_get(intr_handle);
827 mask = RTE_LEN2MASK(nb_efd, uint32_t) << misc_shift;
828 IGC_WRITE_REG(hw, IGC_EIMS, mask);
832 * Get hardware rx-buffer size.
835 igc_get_rx_buffer_size(struct igc_hw *hw)
837 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
841 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
842 * For ASF and Pass Through versions of f/w this means
843 * that the driver is loaded.
846 igc_hw_control_acquire(struct igc_hw *hw)
850 /* Let firmware know the driver has taken over */
851 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
852 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
856 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
857 * For ASF and Pass Through versions of f/w this means that the
858 * driver is no longer loaded.
861 igc_hw_control_release(struct igc_hw *hw)
865 /* Let firmware taken over control of h/w */
866 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
867 IGC_WRITE_REG(hw, IGC_CTRL_EXT,
868 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
872 igc_hardware_init(struct igc_hw *hw)
874 uint32_t rx_buf_size;
877 /* Let the firmware know the OS is in control */
878 igc_hw_control_acquire(hw);
880 /* Issue a global reset */
883 /* disable all wake up */
884 IGC_WRITE_REG(hw, IGC_WUC, 0);
887 * Hardware flow control
888 * - High water mark should allow for at least two standard size (1518)
889 * frames to be received after sending an XOFF.
890 * - Low water mark works best when it is very near the high water mark.
891 * This allows the receiver to restart by sending XON when it has
892 * drained a bit. Here we use an arbitrary value of 1500 which will
893 * restart after one full frame is pulled from the buffer. There
894 * could be several smaller frames in the buffer and if so they will
895 * not trigger the XON until their total number reduces the buffer
898 rx_buf_size = igc_get_rx_buffer_size(hw);
899 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
900 hw->fc.low_water = hw->fc.high_water - 1500;
901 hw->fc.pause_time = IGC_FC_PAUSE_TIME;
903 hw->fc.requested_mode = igc_fc_full;
905 diag = igc_init_hw(hw);
909 igc_get_phy_info(hw);
910 igc_check_for_link(hw);
916 eth_igc_start(struct rte_eth_dev *dev)
918 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
919 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
920 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
921 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
925 PMD_INIT_FUNC_TRACE();
927 /* disable all MSI-X interrupts */
928 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
931 /* clear all MSI-X interrupts */
932 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
934 /* disable uio/vfio intr/eventfd mapping */
935 if (!adapter->stopped)
936 rte_intr_disable(intr_handle);
938 /* Power up the phy. Needed to make the link go Up */
939 eth_igc_set_link_up(dev);
941 /* Put the address into the Receive Address Array */
942 igc_rar_set(hw, hw->mac.addr, 0);
944 /* Initialize the hardware */
945 if (igc_hardware_init(hw)) {
946 PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
949 adapter->stopped = 0;
951 /* check and configure queue intr-vector mapping */
952 if (rte_intr_cap_multiple(intr_handle) &&
953 dev->data->dev_conf.intr_conf.rxq) {
954 uint32_t intr_vector = dev->data->nb_rx_queues;
955 if (rte_intr_efd_enable(intr_handle, intr_vector))
959 if (rte_intr_dp_is_en(intr_handle)) {
960 if (rte_intr_vec_list_alloc(intr_handle, "intr_vec",
961 dev->data->nb_rx_queues)) {
963 "Failed to allocate %d rx_queues intr_vec",
964 dev->data->nb_rx_queues);
969 /* configure msix for rx interrupt */
970 igc_configure_msix_intr(dev);
974 /* This can fail when allocating mbufs for descriptor rings */
975 ret = igc_rx_init(dev);
977 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware");
978 igc_dev_clear_queues(dev);
982 igc_clear_hw_cntrs_base_generic(hw);
984 /* VLAN Offload Settings */
985 eth_igc_vlan_offload_set(dev,
986 RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK |
987 RTE_ETH_VLAN_EXTEND_MASK);
989 /* Setup link speed and duplex */
990 speeds = &dev->data->dev_conf.link_speeds;
991 if (*speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
992 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
997 if (*speeds & RTE_ETH_LINK_SPEED_FIXED) {
999 "Force speed mode currently not supported");
1000 igc_dev_clear_queues(dev);
1004 hw->phy.autoneg_advertised = 0;
1005 hw->mac.autoneg = 1;
1007 if (*speeds & ~(RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
1008 RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
1009 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G)) {
1011 goto error_invalid_config;
1013 if (*speeds & RTE_ETH_LINK_SPEED_10M_HD) {
1014 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
1017 if (*speeds & RTE_ETH_LINK_SPEED_10M) {
1018 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
1021 if (*speeds & RTE_ETH_LINK_SPEED_100M_HD) {
1022 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1025 if (*speeds & RTE_ETH_LINK_SPEED_100M) {
1026 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1029 if (*speeds & RTE_ETH_LINK_SPEED_1G) {
1030 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1033 if (*speeds & RTE_ETH_LINK_SPEED_2_5G) {
1034 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
1037 if (num_speeds == 0)
1038 goto error_invalid_config;
1043 if (rte_intr_allow_others(intr_handle)) {
1044 /* check if lsc interrupt is enabled */
1045 if (dev->data->dev_conf.intr_conf.lsc)
1046 igc_lsc_interrupt_setup(dev, 1);
1048 igc_lsc_interrupt_setup(dev, 0);
1050 rte_intr_callback_unregister(intr_handle,
1051 eth_igc_interrupt_handler,
1053 if (dev->data->dev_conf.intr_conf.lsc)
1055 "LSC won't enable because of no intr multiplex");
1058 /* enable uio/vfio intr/eventfd mapping */
1059 rte_intr_enable(intr_handle);
1061 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1062 igc_update_queue_stats_handler, dev);
1064 /* check if rxq interrupt is enabled */
1065 if (dev->data->dev_conf.intr_conf.rxq &&
1066 rte_intr_dp_is_en(intr_handle))
1067 igc_rxq_interrupt_setup(dev);
1069 /* resume enabled intr since hw reset */
1070 igc_intr_other_enable(dev);
1072 eth_igc_rxtx_control(dev, true);
1073 eth_igc_link_update(dev, 0);
1075 /* configure MAC-loopback mode */
1076 if (dev->data->dev_conf.lpbk_mode == 1) {
1079 reg_val = IGC_READ_REG(hw, IGC_CTRL);
1080 reg_val &= ~IGC_CTRL_SPEED_MASK;
1081 reg_val |= IGC_CTRL_SLU | IGC_CTRL_FRCSPD |
1082 IGC_CTRL_FRCDPX | IGC_CTRL_FD | IGC_CTRL_SPEED_2500;
1083 IGC_WRITE_REG(hw, IGC_CTRL, reg_val);
1085 igc_read_reg_check_set_bits(hw, IGC_EEER, IGC_EEER_EEE_FRC_AN);
1090 error_invalid_config:
1091 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1092 dev->data->dev_conf.link_speeds, dev->data->port_id);
1093 igc_dev_clear_queues(dev);
1098 igc_reset_swfw_lock(struct igc_hw *hw)
1103 * Do mac ops initialization manually here, since we will need
1104 * some function pointers set by this call.
1106 ret_val = igc_init_mac_params(hw);
1111 * SMBI lock should not fail in this early stage. If this is the case,
1112 * it is due to an improper exit of the application.
1113 * So force the release of the faulty lock.
1115 if (igc_get_hw_semaphore_generic(hw) < 0)
1116 PMD_DRV_LOG(DEBUG, "SMBI lock released");
1118 igc_put_hw_semaphore_generic(hw);
1120 if (hw->mac.ops.acquire_swfw_sync != NULL) {
1124 * Phy lock should not fail in this early stage.
1125 * If this is the case, it is due to an improper exit of the
1126 * application. So force the release of the faulty lock.
1128 mask = IGC_SWFW_PHY0_SM;
1129 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
1130 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
1133 hw->mac.ops.release_swfw_sync(hw, mask);
1136 * This one is more tricky since it is common to all ports; but
1137 * swfw_sync retries last long enough (1s) to be almost sure
1138 * that if lock can not be taken it is due to an improper lock
1141 mask = IGC_SWFW_EEP_SM;
1142 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
1143 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1145 hw->mac.ops.release_swfw_sync(hw, mask);
1152 * free all rx/tx queues.
1155 igc_dev_free_queues(struct rte_eth_dev *dev)
1159 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1160 eth_igc_rx_queue_release(dev, i);
1161 dev->data->rx_queues[i] = NULL;
1163 dev->data->nb_rx_queues = 0;
1165 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1166 eth_igc_tx_queue_release(dev, i);
1167 dev->data->tx_queues[i] = NULL;
1169 dev->data->nb_tx_queues = 0;
1173 eth_igc_close(struct rte_eth_dev *dev)
1175 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1176 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
1177 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1178 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
1182 PMD_INIT_FUNC_TRACE();
1183 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1186 if (!adapter->stopped)
1187 ret = eth_igc_stop(dev);
1189 igc_flow_flush(dev, NULL);
1190 igc_clear_all_filter(dev);
1192 igc_intr_other_disable(dev);
1194 int ret = rte_intr_callback_unregister(intr_handle,
1195 eth_igc_interrupt_handler, dev);
1196 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
1199 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
1200 DELAY(200 * 1000); /* delay 200ms */
1201 } while (retry++ < 5);
1203 igc_phy_hw_reset(hw);
1204 igc_hw_control_release(hw);
1205 igc_dev_free_queues(dev);
1207 /* Reset any pending lock */
1208 igc_reset_swfw_lock(hw);
1214 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
1216 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1218 hw->vendor_id = pci_dev->id.vendor_id;
1219 hw->device_id = pci_dev->id.device_id;
1220 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1221 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1225 eth_igc_dev_init(struct rte_eth_dev *dev)
1227 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1228 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1229 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1232 PMD_INIT_FUNC_TRACE();
1233 dev->dev_ops = ð_igc_ops;
1234 dev->rx_queue_count = eth_igc_rx_queue_count;
1235 dev->rx_descriptor_status = eth_igc_rx_descriptor_status;
1236 dev->tx_descriptor_status = eth_igc_tx_descriptor_status;
1239 * for secondary processes, we don't initialize any further as primary
1240 * has already done this work. Only check we don't need a different
1243 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1244 dev->rx_pkt_burst = igc_recv_pkts;
1245 if (dev->data->scattered_rx)
1246 dev->rx_pkt_burst = igc_recv_scattered_pkts;
1248 dev->tx_pkt_burst = igc_xmit_pkts;
1249 dev->tx_pkt_prepare = eth_igc_prep_pkts;
1253 rte_eth_copy_pci_info(dev, pci_dev);
1254 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1257 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1259 igc_identify_hardware(dev, pci_dev);
1260 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
1265 igc_get_bus_info(hw);
1267 /* Reset any pending lock */
1268 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
1273 /* Finish initialization */
1274 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
1279 hw->mac.autoneg = 1;
1280 hw->phy.autoneg_wait_to_complete = 0;
1281 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
1283 /* Copper options */
1284 if (hw->phy.media_type == igc_media_type_copper) {
1285 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
1286 hw->phy.disable_polarity_correction = 0;
1287 hw->phy.ms_type = igc_ms_hw_default;
1291 * Start from a known state, this is important in reading the nvm
1292 * and mac from that.
1296 /* Make sure we have a good EEPROM before we read from it */
1297 if (igc_validate_nvm_checksum(hw) < 0) {
1299 * Some PCI-E parts fail the first check due to
1300 * the link being in sleep state, call it again,
1301 * if it fails a second time its a real issue.
1303 if (igc_validate_nvm_checksum(hw) < 0) {
1304 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
1310 /* Read the permanent MAC address out of the EEPROM */
1311 if (igc_read_mac_addr(hw) != 0) {
1312 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
1317 /* Allocate memory for storing MAC addresses */
1318 dev->data->mac_addrs = rte_zmalloc("igc",
1319 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
1320 if (dev->data->mac_addrs == NULL) {
1321 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
1322 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
1327 /* Copy the permanent MAC address */
1328 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1329 &dev->data->mac_addrs[0]);
1331 /* Now initialize the hardware */
1332 if (igc_hardware_init(hw) != 0) {
1333 PMD_INIT_LOG(ERR, "Hardware initialization failed");
1334 rte_free(dev->data->mac_addrs);
1335 dev->data->mac_addrs = NULL;
1340 hw->mac.get_link_status = 1;
1343 /* Indicate SOL/IDER usage */
1344 if (igc_check_reset_block(hw) < 0)
1346 "PHY reset is blocked due to SOL/IDER session.");
1348 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
1349 dev->data->port_id, pci_dev->id.vendor_id,
1350 pci_dev->id.device_id);
1352 rte_intr_callback_register(pci_dev->intr_handle,
1353 eth_igc_interrupt_handler, (void *)dev);
1355 /* enable uio/vfio intr/eventfd mapping */
1356 rte_intr_enable(pci_dev->intr_handle);
1358 /* enable support intr */
1359 igc_intr_other_enable(dev);
1361 /* initiate queue status */
1362 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1363 igc->txq_stats_map[i] = -1;
1364 igc->rxq_stats_map[i] = -1;
1368 igc_clear_all_filter(dev);
1372 igc_hw_control_release(hw);
1377 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
1379 PMD_INIT_FUNC_TRACE();
1380 eth_igc_close(eth_dev);
1385 eth_igc_reset(struct rte_eth_dev *dev)
1389 PMD_INIT_FUNC_TRACE();
1391 ret = eth_igc_dev_uninit(dev);
1395 return eth_igc_dev_init(dev);
1399 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
1401 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1404 rctl = IGC_READ_REG(hw, IGC_RCTL);
1405 rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1406 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1411 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
1413 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1416 rctl = IGC_READ_REG(hw, IGC_RCTL);
1417 rctl &= (~IGC_RCTL_UPE);
1418 if (dev->data->all_multicast == 1)
1419 rctl |= IGC_RCTL_MPE;
1421 rctl &= (~IGC_RCTL_MPE);
1422 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1427 eth_igc_allmulticast_enable(struct rte_eth_dev *dev)
1429 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1432 rctl = IGC_READ_REG(hw, IGC_RCTL);
1433 rctl |= IGC_RCTL_MPE;
1434 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1439 eth_igc_allmulticast_disable(struct rte_eth_dev *dev)
1441 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1444 if (dev->data->promiscuous == 1)
1445 return 0; /* must remain in all_multicast mode */
1447 rctl = IGC_READ_REG(hw, IGC_RCTL);
1448 rctl &= (~IGC_RCTL_MPE);
1449 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1454 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1457 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1458 struct igc_fw_version fw;
1461 igc_get_fw_version(hw, &fw);
1463 /* if option rom is valid, display its version too */
1465 ret = snprintf(fw_version, fw_size,
1466 "%d.%d, 0x%08x, %d.%d.%d",
1467 fw.eep_major, fw.eep_minor, fw.etrack_id,
1468 fw.or_major, fw.or_build, fw.or_patch);
1471 if (fw.etrack_id != 0X0000) {
1472 ret = snprintf(fw_version, fw_size,
1474 fw.eep_major, fw.eep_minor,
1477 ret = snprintf(fw_version, fw_size,
1479 fw.eep_major, fw.eep_minor,
1486 ret += 1; /* add the size of '\0' */
1487 if (fw_size < (size_t)ret)
1494 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1496 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1498 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1499 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
1500 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1501 dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
1502 dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
1503 dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
1504 dev_info->rx_queue_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1506 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
1507 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
1508 dev_info->max_vmdq_pools = 0;
1510 dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
1511 dev_info->reta_size = RTE_ETH_RSS_RETA_SIZE_128;
1512 dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
1514 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1516 .pthresh = IGC_DEFAULT_RX_PTHRESH,
1517 .hthresh = IGC_DEFAULT_RX_HTHRESH,
1518 .wthresh = IGC_DEFAULT_RX_WTHRESH,
1520 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,
1525 dev_info->default_txconf = (struct rte_eth_txconf) {
1527 .pthresh = IGC_DEFAULT_TX_PTHRESH,
1528 .hthresh = IGC_DEFAULT_TX_HTHRESH,
1529 .wthresh = IGC_DEFAULT_TX_WTHRESH,
1534 dev_info->rx_desc_lim = rx_desc_lim;
1535 dev_info->tx_desc_lim = tx_desc_lim;
1537 dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD | RTE_ETH_LINK_SPEED_10M |
1538 RTE_ETH_LINK_SPEED_100M_HD | RTE_ETH_LINK_SPEED_100M |
1539 RTE_ETH_LINK_SPEED_1G | RTE_ETH_LINK_SPEED_2_5G;
1541 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
1542 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1547 eth_igc_led_on(struct rte_eth_dev *dev)
1549 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1551 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1555 eth_igc_led_off(struct rte_eth_dev *dev)
1557 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1559 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1562 static const uint32_t *
1563 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
1565 static const uint32_t ptypes[] = {
1566 /* refers to rx_desc_pkt_info_to_pkt_type() */
1569 RTE_PTYPE_L3_IPV4_EXT,
1571 RTE_PTYPE_L3_IPV6_EXT,
1575 RTE_PTYPE_TUNNEL_IP,
1576 RTE_PTYPE_INNER_L3_IPV6,
1577 RTE_PTYPE_INNER_L3_IPV6_EXT,
1578 RTE_PTYPE_INNER_L4_TCP,
1579 RTE_PTYPE_INNER_L4_UDP,
1587 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1589 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1590 uint32_t frame_size = mtu + IGC_ETH_OVERHEAD;
1593 /* if extend vlan has been enabled */
1594 if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
1595 frame_size += VLAN_TAG_SIZE;
1598 * If device is started, refuse mtu that requires the support of
1599 * scattered packets when this feature has not been enabled before.
1601 if (dev->data->dev_started && !dev->data->scattered_rx &&
1602 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
1603 PMD_INIT_LOG(ERR, "Stop port first.");
1607 rctl = IGC_READ_REG(hw, IGC_RCTL);
1608 if (mtu > RTE_ETHER_MTU)
1609 rctl |= IGC_RCTL_LPE;
1611 rctl &= ~IGC_RCTL_LPE;
1612 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1614 IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
1620 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1621 uint32_t index, uint32_t pool)
1623 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1625 igc_rar_set(hw, mac_addr->addr_bytes, index);
1631 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1633 uint8_t addr[RTE_ETHER_ADDR_LEN];
1634 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1636 memset(addr, 0, sizeof(addr));
1637 igc_rar_set(hw, addr, index);
1641 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
1642 struct rte_ether_addr *addr)
1644 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1645 igc_rar_set(hw, addr->addr_bytes, 0);
1650 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
1651 struct rte_ether_addr *mc_addr_set,
1652 uint32_t nb_mc_addr)
1654 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1655 igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
1660 * Read hardware registers
1663 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)
1667 uint64_t old_gprc = stats->gprc;
1668 uint64_t old_gptc = stats->gptc;
1669 uint64_t old_tpr = stats->tpr;
1670 uint64_t old_tpt = stats->tpt;
1671 uint64_t old_rpthc = stats->rpthc;
1672 uint64_t old_hgptc = stats->hgptc;
1674 stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);
1675 stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);
1676 stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);
1677 stats->mpc += IGC_READ_REG(hw, IGC_MPC);
1678 stats->scc += IGC_READ_REG(hw, IGC_SCC);
1679 stats->ecol += IGC_READ_REG(hw, IGC_ECOL);
1681 stats->mcc += IGC_READ_REG(hw, IGC_MCC);
1682 stats->latecol += IGC_READ_REG(hw, IGC_LATECOL);
1683 stats->colc += IGC_READ_REG(hw, IGC_COLC);
1685 stats->dc += IGC_READ_REG(hw, IGC_DC);
1686 stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);
1687 stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);
1688 stats->rlec += IGC_READ_REG(hw, IGC_RLEC);
1689 stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);
1690 stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);
1693 * For watchdog management we need to know if we have been
1694 * paused during the last interval, so capture that here.
1696 pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);
1697 stats->xoffrxc += pause_frames;
1698 stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);
1699 stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);
1700 stats->prc64 += IGC_READ_REG(hw, IGC_PRC64);
1701 stats->prc127 += IGC_READ_REG(hw, IGC_PRC127);
1702 stats->prc255 += IGC_READ_REG(hw, IGC_PRC255);
1703 stats->prc511 += IGC_READ_REG(hw, IGC_PRC511);
1704 stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);
1705 stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);
1706 stats->gprc += IGC_READ_REG(hw, IGC_GPRC);
1707 stats->bprc += IGC_READ_REG(hw, IGC_BPRC);
1708 stats->mprc += IGC_READ_REG(hw, IGC_MPRC);
1709 stats->gptc += IGC_READ_REG(hw, IGC_GPTC);
1711 /* For the 64-bit byte counters the low dword must be read first. */
1712 /* Both registers clear on the read of the high dword */
1714 /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1715 stats->gorc += IGC_READ_REG(hw, IGC_GORCL);
1716 stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);
1717 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1718 stats->gotc += IGC_READ_REG(hw, IGC_GOTCL);
1719 stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);
1720 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1722 stats->rnbc += IGC_READ_REG(hw, IGC_RNBC);
1723 stats->ruc += IGC_READ_REG(hw, IGC_RUC);
1724 stats->rfc += IGC_READ_REG(hw, IGC_RFC);
1725 stats->roc += IGC_READ_REG(hw, IGC_ROC);
1726 stats->rjc += IGC_READ_REG(hw, IGC_RJC);
1728 stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);
1729 stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);
1730 stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);
1731 stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);
1732 stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);
1733 stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);
1734 stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);
1736 stats->tpr += IGC_READ_REG(hw, IGC_TPR);
1737 stats->tpt += IGC_READ_REG(hw, IGC_TPT);
1739 stats->tor += IGC_READ_REG(hw, IGC_TORL);
1740 stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);
1741 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1742 stats->tot += IGC_READ_REG(hw, IGC_TOTL);
1743 stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);
1744 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1746 stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);
1747 stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);
1748 stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);
1749 stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);
1750 stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);
1751 stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);
1752 stats->mptc += IGC_READ_REG(hw, IGC_MPTC);
1753 stats->bptc += IGC_READ_REG(hw, IGC_BPTC);
1754 stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);
1756 stats->iac += IGC_READ_REG(hw, IGC_IAC);
1757 stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);
1758 stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);
1759 stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);
1761 /* Host to Card Statistics */
1762 stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);
1763 stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);
1764 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1765 stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);
1766 stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);
1767 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1768 stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);
1772 * Write 0 to all queue status registers
1775 igc_reset_queue_stats_register(struct igc_hw *hw)
1779 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1780 IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);
1781 IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);
1782 IGC_WRITE_REG(hw, IGC_PQGORC(i), 0);
1783 IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);
1784 IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);
1785 IGC_WRITE_REG(hw, IGC_RQDPC(i), 0);
1786 IGC_WRITE_REG(hw, IGC_TQDPC(i), 0);
1791 * Read all hardware queue status registers
1794 igc_read_queue_stats_register(struct rte_eth_dev *dev)
1796 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1797 struct igc_hw_queue_stats *queue_stats =
1798 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1802 * This register is not cleared on read. Furthermore, the register wraps
1803 * around back to 0x00000000 on the next increment when reaching a value
1804 * of 0xFFFFFFFF and then continues normal count operation.
1806 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1814 * Read the register first, if the value is smaller than that
1815 * previous read, that mean the register has been overflowed,
1816 * then we add the high 4 bytes by 1 and replace the low 4
1817 * bytes by the new value.
1819 tmp = IGC_READ_REG(hw, IGC_PQGPRC(i));
1820 value.ddword = queue_stats->pqgprc[i];
1821 if (value.dword[U32_0_IN_U64] > tmp)
1822 value.dword[U32_1_IN_U64]++;
1823 value.dword[U32_0_IN_U64] = tmp;
1824 queue_stats->pqgprc[i] = value.ddword;
1826 tmp = IGC_READ_REG(hw, IGC_PQGPTC(i));
1827 value.ddword = queue_stats->pqgptc[i];
1828 if (value.dword[U32_0_IN_U64] > tmp)
1829 value.dword[U32_1_IN_U64]++;
1830 value.dword[U32_0_IN_U64] = tmp;
1831 queue_stats->pqgptc[i] = value.ddword;
1833 tmp = IGC_READ_REG(hw, IGC_PQGORC(i));
1834 value.ddword = queue_stats->pqgorc[i];
1835 if (value.dword[U32_0_IN_U64] > tmp)
1836 value.dword[U32_1_IN_U64]++;
1837 value.dword[U32_0_IN_U64] = tmp;
1838 queue_stats->pqgorc[i] = value.ddword;
1840 tmp = IGC_READ_REG(hw, IGC_PQGOTC(i));
1841 value.ddword = queue_stats->pqgotc[i];
1842 if (value.dword[U32_0_IN_U64] > tmp)
1843 value.dword[U32_1_IN_U64]++;
1844 value.dword[U32_0_IN_U64] = tmp;
1845 queue_stats->pqgotc[i] = value.ddword;
1847 tmp = IGC_READ_REG(hw, IGC_PQMPRC(i));
1848 value.ddword = queue_stats->pqmprc[i];
1849 if (value.dword[U32_0_IN_U64] > tmp)
1850 value.dword[U32_1_IN_U64]++;
1851 value.dword[U32_0_IN_U64] = tmp;
1852 queue_stats->pqmprc[i] = value.ddword;
1854 tmp = IGC_READ_REG(hw, IGC_RQDPC(i));
1855 value.ddword = queue_stats->rqdpc[i];
1856 if (value.dword[U32_0_IN_U64] > tmp)
1857 value.dword[U32_1_IN_U64]++;
1858 value.dword[U32_0_IN_U64] = tmp;
1859 queue_stats->rqdpc[i] = value.ddword;
1861 tmp = IGC_READ_REG(hw, IGC_TQDPC(i));
1862 value.ddword = queue_stats->tqdpc[i];
1863 if (value.dword[U32_0_IN_U64] > tmp)
1864 value.dword[U32_1_IN_U64]++;
1865 value.dword[U32_0_IN_U64] = tmp;
1866 queue_stats->tqdpc[i] = value.ddword;
1871 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1873 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1874 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1875 struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev);
1876 struct igc_hw_queue_stats *queue_stats =
1877 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1881 * Cancel status handler since it will read the queue status registers
1883 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1885 /* Read status register */
1886 igc_read_queue_stats_register(dev);
1887 igc_read_stats_registers(hw, stats);
1889 if (rte_stats == NULL) {
1890 /* Restart queue status handler */
1891 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1892 igc_update_queue_stats_handler, dev);
1897 rte_stats->imissed = stats->mpc;
1898 rte_stats->ierrors = stats->crcerrs + stats->rlec +
1899 stats->rxerrc + stats->algnerrc;
1902 rte_stats->oerrors = stats->ecol + stats->latecol;
1904 rte_stats->ipackets = stats->gprc;
1905 rte_stats->opackets = stats->gptc;
1906 rte_stats->ibytes = stats->gorc;
1907 rte_stats->obytes = stats->gotc;
1909 /* Get per-queue statuses */
1910 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1911 /* GET TX queue statuses */
1912 int map_id = igc->txq_stats_map[i];
1914 rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i];
1915 rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i];
1917 /* Get RX queue statuses */
1918 map_id = igc->rxq_stats_map[i];
1920 rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i];
1921 rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i];
1922 rte_stats->q_errors[map_id] += queue_stats->rqdpc[i];
1926 /* Restart queue status handler */
1927 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1928 igc_update_queue_stats_handler, dev);
1933 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1936 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1937 struct igc_hw_stats *hw_stats =
1938 IGC_DEV_PRIVATE_STATS(dev);
1941 igc_read_stats_registers(hw, hw_stats);
1943 if (n < IGC_NB_XSTATS)
1944 return IGC_NB_XSTATS;
1946 /* If this is a reset xstats is NULL, and we have cleared the
1947 * registers by reading them.
1952 /* Extended stats */
1953 for (i = 0; i < IGC_NB_XSTATS; i++) {
1955 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1956 rte_igc_stats_strings[i].offset);
1959 return IGC_NB_XSTATS;
1963 eth_igc_xstats_reset(struct rte_eth_dev *dev)
1965 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1966 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
1967 struct igc_hw_queue_stats *queue_stats =
1968 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1970 /* Cancel queue status handler for avoid conflict */
1971 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1973 /* HW registers are cleared on read */
1974 igc_reset_queue_stats_register(hw);
1975 igc_read_stats_registers(hw, hw_stats);
1977 /* Reset software totals */
1978 memset(hw_stats, 0, sizeof(*hw_stats));
1979 memset(queue_stats, 0, sizeof(*queue_stats));
1981 /* Restart the queue status handler */
1982 rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,
1989 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1990 struct rte_eth_xstat_name *xstats_names, unsigned int size)
1994 if (xstats_names == NULL)
1995 return IGC_NB_XSTATS;
1997 if (size < IGC_NB_XSTATS) {
1998 PMD_DRV_LOG(ERR, "not enough buffers!");
1999 return IGC_NB_XSTATS;
2002 for (i = 0; i < IGC_NB_XSTATS; i++)
2003 strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,
2004 sizeof(xstats_names[i].name));
2006 return IGC_NB_XSTATS;
2010 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
2011 const uint64_t *ids, struct rte_eth_xstat_name *xstats_names,
2017 return eth_igc_xstats_get_names(dev, xstats_names, limit);
2019 for (i = 0; i < limit; i++) {
2020 if (ids[i] >= IGC_NB_XSTATS) {
2021 PMD_DRV_LOG(ERR, "id value isn't valid");
2024 strlcpy(xstats_names[i].name,
2025 rte_igc_stats_strings[ids[i]].name,
2026 sizeof(xstats_names[i].name));
2032 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2033 uint64_t *values, unsigned int n)
2035 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2036 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
2039 igc_read_stats_registers(hw, hw_stats);
2042 if (n < IGC_NB_XSTATS)
2043 return IGC_NB_XSTATS;
2045 /* If this is a reset xstats is NULL, and we have cleared the
2046 * registers by reading them.
2051 /* Extended stats */
2052 for (i = 0; i < IGC_NB_XSTATS; i++)
2053 values[i] = *(uint64_t *)(((char *)hw_stats) +
2054 rte_igc_stats_strings[i].offset);
2056 return IGC_NB_XSTATS;
2059 for (i = 0; i < n; i++) {
2060 if (ids[i] >= IGC_NB_XSTATS) {
2061 PMD_DRV_LOG(ERR, "id value isn't valid");
2064 values[i] = *(uint64_t *)(((char *)hw_stats) +
2065 rte_igc_stats_strings[ids[i]].offset);
2072 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
2073 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx)
2075 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
2077 /* check queue id is valid */
2078 if (queue_id >= IGC_QUEUE_PAIRS_NUM) {
2079 PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u",
2080 queue_id, IGC_QUEUE_PAIRS_NUM - 1);
2084 /* store the mapping status id */
2086 igc->rxq_stats_map[queue_id] = stat_idx;
2088 igc->txq_stats_map[queue_id] = stat_idx;
2094 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2096 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2097 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2098 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2099 uint32_t vec = IGC_MISC_VEC_ID;
2101 if (rte_intr_allow_others(intr_handle))
2102 vec = IGC_RX_VEC_START;
2104 uint32_t mask = 1u << (queue_id + vec);
2106 IGC_WRITE_REG(hw, IGC_EIMC, mask);
2107 IGC_WRITE_FLUSH(hw);
2113 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2115 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2116 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2117 struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
2118 uint32_t vec = IGC_MISC_VEC_ID;
2120 if (rte_intr_allow_others(intr_handle))
2121 vec = IGC_RX_VEC_START;
2123 uint32_t mask = 1u << (queue_id + vec);
2125 IGC_WRITE_REG(hw, IGC_EIMS, mask);
2126 IGC_WRITE_FLUSH(hw);
2128 rte_intr_enable(intr_handle);
2134 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2136 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2141 fc_conf->pause_time = hw->fc.pause_time;
2142 fc_conf->high_water = hw->fc.high_water;
2143 fc_conf->low_water = hw->fc.low_water;
2144 fc_conf->send_xon = hw->fc.send_xon;
2145 fc_conf->autoneg = hw->mac.autoneg;
2148 * Return rx_pause and tx_pause status according to actual setting of
2149 * the TFCE and RFCE bits in the CTRL register.
2151 ctrl = IGC_READ_REG(hw, IGC_CTRL);
2152 if (ctrl & IGC_CTRL_TFCE)
2157 if (ctrl & IGC_CTRL_RFCE)
2162 if (rx_pause && tx_pause)
2163 fc_conf->mode = RTE_ETH_FC_FULL;
2165 fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
2167 fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
2169 fc_conf->mode = RTE_ETH_FC_NONE;
2175 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2177 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2178 uint32_t rx_buf_size;
2179 uint32_t max_high_water;
2183 if (fc_conf->autoneg != hw->mac.autoneg)
2186 rx_buf_size = igc_get_rx_buffer_size(hw);
2187 PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2189 /* At least reserve one Ethernet frame for watermark */
2190 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
2191 if (fc_conf->high_water > max_high_water ||
2192 fc_conf->high_water < fc_conf->low_water) {
2194 "Incorrect high(%u)/low(%u) water value, max is %u",
2195 fc_conf->high_water, fc_conf->low_water,
2200 switch (fc_conf->mode) {
2201 case RTE_ETH_FC_NONE:
2202 hw->fc.requested_mode = igc_fc_none;
2204 case RTE_ETH_FC_RX_PAUSE:
2205 hw->fc.requested_mode = igc_fc_rx_pause;
2207 case RTE_ETH_FC_TX_PAUSE:
2208 hw->fc.requested_mode = igc_fc_tx_pause;
2210 case RTE_ETH_FC_FULL:
2211 hw->fc.requested_mode = igc_fc_full;
2214 PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode);
2218 hw->fc.pause_time = fc_conf->pause_time;
2219 hw->fc.high_water = fc_conf->high_water;
2220 hw->fc.low_water = fc_conf->low_water;
2221 hw->fc.send_xon = fc_conf->send_xon;
2223 err = igc_setup_link_generic(hw);
2224 if (err == IGC_SUCCESS) {
2226 * check if we want to forward MAC frames - driver doesn't have
2227 * native capability to do that, so we'll write the registers
2230 rctl = IGC_READ_REG(hw, IGC_RCTL);
2232 /* set or clear MFLCN.PMCF bit depending on configuration */
2233 if (fc_conf->mac_ctrl_frame_fwd != 0)
2234 rctl |= IGC_RCTL_PMCF;
2236 rctl &= ~IGC_RCTL_PMCF;
2238 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
2239 IGC_WRITE_FLUSH(hw);
2244 PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err);
2249 eth_igc_rss_reta_update(struct rte_eth_dev *dev,
2250 struct rte_eth_rss_reta_entry64 *reta_conf,
2253 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2256 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2258 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2259 reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2263 RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
2265 /* set redirection table */
2266 for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2267 union igc_rss_reta_reg reta, reg;
2268 uint16_t idx, shift;
2271 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2272 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2273 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2274 IGC_RSS_RDT_REG_SIZE_MASK);
2276 /* if no need to update the register */
2278 shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2281 /* check mask whether need to read the register value first */
2282 if (mask == IGC_RSS_RDT_REG_SIZE_MASK)
2285 reg.dword = IGC_READ_REG_LE_VALUE(hw,
2286 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2288 /* update the register */
2289 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2290 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2291 if (mask & (1u << j))
2293 (uint8_t)reta_conf[idx].reta[shift + j];
2295 reta.bytes[j] = reg.bytes[j];
2297 IGC_WRITE_REG_LE_VALUE(hw,
2298 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword);
2305 eth_igc_rss_reta_query(struct rte_eth_dev *dev,
2306 struct rte_eth_rss_reta_entry64 *reta_conf,
2309 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2312 if (reta_size != RTE_ETH_RSS_RETA_SIZE_128) {
2314 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2315 reta_size, RTE_ETH_RSS_RETA_SIZE_128);
2319 RTE_BUILD_BUG_ON(RTE_ETH_RSS_RETA_SIZE_128 % IGC_RSS_RDT_REG_SIZE);
2321 /* read redirection table */
2322 for (i = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2323 union igc_rss_reta_reg reta;
2324 uint16_t idx, shift;
2327 idx = i / RTE_ETH_RETA_GROUP_SIZE;
2328 shift = i % RTE_ETH_RETA_GROUP_SIZE;
2329 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2330 IGC_RSS_RDT_REG_SIZE_MASK);
2332 /* if no need to read register */
2334 shift > (RTE_ETH_RETA_GROUP_SIZE - IGC_RSS_RDT_REG_SIZE))
2337 /* read register and get the queue index */
2338 RTE_BUILD_BUG_ON(sizeof(reta.bytes) != IGC_RSS_RDT_REG_SIZE);
2339 reta.dword = IGC_READ_REG_LE_VALUE(hw,
2340 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2341 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2342 if (mask & (1u << j))
2343 reta_conf[idx].reta[shift + j] = reta.bytes[j];
2351 eth_igc_rss_hash_update(struct rte_eth_dev *dev,
2352 struct rte_eth_rss_conf *rss_conf)
2354 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2355 igc_hw_rss_hash_set(hw, rss_conf);
2360 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
2361 struct rte_eth_rss_conf *rss_conf)
2363 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2364 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
2368 if (hash_key != NULL) {
2371 /* if not enough space for store hash key */
2372 if (rss_conf->rss_key_len != IGC_HKEY_SIZE) {
2374 "RSS hash key size %u in parameter doesn't match the hardware hash key size %u",
2375 rss_conf->rss_key_len, IGC_HKEY_SIZE);
2379 /* read RSS key from register */
2380 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
2381 hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i));
2384 /* get RSS functions configured in MRQC register */
2385 mrqc = IGC_READ_REG(hw, IGC_MRQC);
2386 if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0)
2390 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
2391 rss_hf |= RTE_ETH_RSS_IPV4;
2392 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
2393 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2394 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
2395 rss_hf |= RTE_ETH_RSS_IPV6;
2396 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
2397 rss_hf |= RTE_ETH_RSS_IPV6_EX;
2398 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
2399 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
2400 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
2401 rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
2402 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
2403 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2404 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
2405 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
2406 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
2407 rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
2409 rss_conf->rss_hf |= rss_hf;
2414 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2416 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2417 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2422 vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK;
2423 vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK);
2424 vfta = shadow_vfta->vfta[vid_idx];
2429 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta);
2431 /* update local VFTA copy */
2432 shadow_vfta->vfta[vid_idx] = vfta;
2438 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2440 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2441 igc_read_reg_check_clear_bits(hw, IGC_RCTL,
2442 IGC_RCTL_CFIEN | IGC_RCTL_VFE);
2446 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2448 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2449 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2453 /* Filter Table Enable, CFI not used for packet acceptance */
2454 reg_val = IGC_READ_REG(hw, IGC_RCTL);
2455 reg_val &= ~IGC_RCTL_CFIEN;
2456 reg_val |= IGC_RCTL_VFE;
2457 IGC_WRITE_REG(hw, IGC_RCTL, reg_val);
2459 /* restore VFTA table */
2460 for (i = 0; i < IGC_VFTA_SIZE; i++)
2461 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]);
2465 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2467 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2469 igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2473 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2475 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2477 igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2481 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2483 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2484 uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
2487 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2489 /* if extend vlan hasn't been enabled */
2490 if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
2493 /* Update maximum packet length */
2494 if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
2495 PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
2496 frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
2499 IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE);
2501 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
2506 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2508 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2509 uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
2512 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2514 /* if extend vlan has been enabled */
2515 if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
2518 /* Update maximum packet length */
2519 if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) {
2520 PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
2521 frame_size, MAX_RX_JUMBO_FRAME_SIZE);
2524 IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
2526 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
2531 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2533 struct rte_eth_rxmode *rxmode;
2535 rxmode = &dev->data->dev_conf.rxmode;
2536 if (mask & RTE_ETH_VLAN_STRIP_MASK) {
2537 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
2538 igc_vlan_hw_strip_enable(dev);
2540 igc_vlan_hw_strip_disable(dev);
2543 if (mask & RTE_ETH_VLAN_FILTER_MASK) {
2544 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
2545 igc_vlan_hw_filter_enable(dev);
2547 igc_vlan_hw_filter_disable(dev);
2550 if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
2551 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
2552 return igc_vlan_hw_extend_enable(dev);
2554 return igc_vlan_hw_extend_disable(dev);
2561 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
2562 enum rte_vlan_type vlan_type,
2565 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2568 /* only outer TPID of double VLAN can be configured*/
2569 if (vlan_type == RTE_ETH_VLAN_TYPE_OUTER) {
2570 reg_val = IGC_READ_REG(hw, IGC_VET);
2571 reg_val = (reg_val & (~IGC_VET_EXT)) |
2572 ((uint32_t)tpid << IGC_VET_EXT_SHIFT);
2573 IGC_WRITE_REG(hw, IGC_VET, reg_val);
2578 /* all other TPID values are read-only*/
2579 PMD_DRV_LOG(ERR, "Not supported");
2584 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2585 struct rte_pci_device *pci_dev)
2587 PMD_INIT_FUNC_TRACE();
2588 return rte_eth_dev_pci_generic_probe(pci_dev,
2589 sizeof(struct igc_adapter), eth_igc_dev_init);
2593 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
2595 PMD_INIT_FUNC_TRACE();
2596 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
2599 static struct rte_pci_driver rte_igc_pmd = {
2600 .id_table = pci_id_igc_map,
2601 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2602 .probe = eth_igc_pci_probe,
2603 .remove = eth_igc_pci_remove,
2606 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
2607 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
2608 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");