1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019-2020 Intel Corporation
8 #include <rte_string_fns.h>
10 #include <rte_bus_pci.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_malloc.h>
14 #include <rte_alarm.h>
19 #define IGC_INTEL_VENDOR_ID 0x8086
22 * The overhead from MTU to max frame size.
23 * Considering VLAN so tag needs to be counted.
25 #define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
26 RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
28 #define IGC_FC_PAUSE_TIME 0x0680
29 #define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
30 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
32 #define IGC_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
33 #define IGC_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
34 #define IGC_MSIX_OTHER_INTR_VEC 0 /* MSI-X other interrupt vector */
35 #define IGC_FLAG_NEED_LINK_UPDATE (1u << 0) /* need update link */
37 #define IGC_DEFAULT_RX_FREE_THRESH 32
39 #define IGC_DEFAULT_RX_PTHRESH 8
40 #define IGC_DEFAULT_RX_HTHRESH 8
41 #define IGC_DEFAULT_RX_WTHRESH 4
43 #define IGC_DEFAULT_TX_PTHRESH 8
44 #define IGC_DEFAULT_TX_HTHRESH 1
45 #define IGC_DEFAULT_TX_WTHRESH 16
47 /* MSI-X other interrupt vector */
48 #define IGC_MSIX_OTHER_INTR_VEC 0
50 /* External VLAN Enable bit mask */
51 #define IGC_CTRL_EXT_EXT_VLAN (1u << 26)
53 /* External VLAN Ether Type bit mask and shift */
54 #define IGC_VET_EXT 0xFFFF0000
55 #define IGC_VET_EXT_SHIFT 16
57 /* Per Queue Good Packets Received Count */
58 #define IGC_PQGPRC(idx) (0x10010 + 0x100 * (idx))
59 /* Per Queue Good Octets Received Count */
60 #define IGC_PQGORC(idx) (0x10018 + 0x100 * (idx))
61 /* Per Queue Good Octets Transmitted Count */
62 #define IGC_PQGOTC(idx) (0x10034 + 0x100 * (idx))
63 /* Per Queue Multicast Packets Received Count */
64 #define IGC_PQMPRC(idx) (0x10038 + 0x100 * (idx))
65 /* Transmit Queue Drop Packet Count */
66 #define IGC_TQDPC(idx) (0xe030 + 0x40 * (idx))
68 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
69 #define U32_0_IN_U64 0 /* lower bytes of u64 */
70 #define U32_1_IN_U64 1 /* higher bytes of u64 */
72 #define U32_0_IN_U64 1
73 #define U32_1_IN_U64 0
76 #define IGC_ALARM_INTERVAL 8000000u
77 /* us, about 13.6s some per-queue registers will wrap around back to 0. */
79 static const struct rte_eth_desc_lim rx_desc_lim = {
80 .nb_max = IGC_MAX_RXD,
81 .nb_min = IGC_MIN_RXD,
82 .nb_align = IGC_RXD_ALIGN,
85 static const struct rte_eth_desc_lim tx_desc_lim = {
86 .nb_max = IGC_MAX_TXD,
87 .nb_min = IGC_MIN_TXD,
88 .nb_align = IGC_TXD_ALIGN,
89 .nb_seg_max = IGC_TX_MAX_SEG,
90 .nb_mtu_seg_max = IGC_TX_MAX_MTU_SEG,
93 static const struct rte_pci_id pci_id_igc_map[] = {
94 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_LM) },
95 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_V) },
96 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_I) },
97 { RTE_PCI_DEVICE(IGC_INTEL_VENDOR_ID, IGC_DEV_ID_I225_K) },
98 { .vendor_id = 0, /* sentinel */ },
101 /* store statistics names and its offset in stats structure */
102 struct rte_igc_xstats_name_off {
103 char name[RTE_ETH_XSTATS_NAME_SIZE];
107 static const struct rte_igc_xstats_name_off rte_igc_stats_strings[] = {
108 {"rx_crc_errors", offsetof(struct igc_hw_stats, crcerrs)},
109 {"rx_align_errors", offsetof(struct igc_hw_stats, algnerrc)},
110 {"rx_errors", offsetof(struct igc_hw_stats, rxerrc)},
111 {"rx_missed_packets", offsetof(struct igc_hw_stats, mpc)},
112 {"tx_single_collision_packets", offsetof(struct igc_hw_stats, scc)},
113 {"tx_multiple_collision_packets", offsetof(struct igc_hw_stats, mcc)},
114 {"tx_excessive_collision_packets", offsetof(struct igc_hw_stats,
116 {"tx_late_collisions", offsetof(struct igc_hw_stats, latecol)},
117 {"tx_total_collisions", offsetof(struct igc_hw_stats, colc)},
118 {"tx_deferred_packets", offsetof(struct igc_hw_stats, dc)},
119 {"tx_no_carrier_sense_packets", offsetof(struct igc_hw_stats, tncrs)},
120 {"tx_discarded_packets", offsetof(struct igc_hw_stats, htdpmc)},
121 {"rx_length_errors", offsetof(struct igc_hw_stats, rlec)},
122 {"rx_xon_packets", offsetof(struct igc_hw_stats, xonrxc)},
123 {"tx_xon_packets", offsetof(struct igc_hw_stats, xontxc)},
124 {"rx_xoff_packets", offsetof(struct igc_hw_stats, xoffrxc)},
125 {"tx_xoff_packets", offsetof(struct igc_hw_stats, xofftxc)},
126 {"rx_flow_control_unsupported_packets", offsetof(struct igc_hw_stats,
128 {"rx_size_64_packets", offsetof(struct igc_hw_stats, prc64)},
129 {"rx_size_65_to_127_packets", offsetof(struct igc_hw_stats, prc127)},
130 {"rx_size_128_to_255_packets", offsetof(struct igc_hw_stats, prc255)},
131 {"rx_size_256_to_511_packets", offsetof(struct igc_hw_stats, prc511)},
132 {"rx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
134 {"rx_size_1024_to_max_packets", offsetof(struct igc_hw_stats,
136 {"rx_broadcast_packets", offsetof(struct igc_hw_stats, bprc)},
137 {"rx_multicast_packets", offsetof(struct igc_hw_stats, mprc)},
138 {"rx_undersize_errors", offsetof(struct igc_hw_stats, ruc)},
139 {"rx_fragment_errors", offsetof(struct igc_hw_stats, rfc)},
140 {"rx_oversize_errors", offsetof(struct igc_hw_stats, roc)},
141 {"rx_jabber_errors", offsetof(struct igc_hw_stats, rjc)},
142 {"rx_no_buffers", offsetof(struct igc_hw_stats, rnbc)},
143 {"rx_management_packets", offsetof(struct igc_hw_stats, mgprc)},
144 {"rx_management_dropped", offsetof(struct igc_hw_stats, mgpdc)},
145 {"tx_management_packets", offsetof(struct igc_hw_stats, mgptc)},
146 {"rx_total_packets", offsetof(struct igc_hw_stats, tpr)},
147 {"tx_total_packets", offsetof(struct igc_hw_stats, tpt)},
148 {"rx_total_bytes", offsetof(struct igc_hw_stats, tor)},
149 {"tx_total_bytes", offsetof(struct igc_hw_stats, tot)},
150 {"tx_size_64_packets", offsetof(struct igc_hw_stats, ptc64)},
151 {"tx_size_65_to_127_packets", offsetof(struct igc_hw_stats, ptc127)},
152 {"tx_size_128_to_255_packets", offsetof(struct igc_hw_stats, ptc255)},
153 {"tx_size_256_to_511_packets", offsetof(struct igc_hw_stats, ptc511)},
154 {"tx_size_512_to_1023_packets", offsetof(struct igc_hw_stats,
156 {"tx_size_1023_to_max_packets", offsetof(struct igc_hw_stats,
158 {"tx_multicast_packets", offsetof(struct igc_hw_stats, mptc)},
159 {"tx_broadcast_packets", offsetof(struct igc_hw_stats, bptc)},
160 {"tx_tso_packets", offsetof(struct igc_hw_stats, tsctc)},
161 {"rx_sent_to_host_packets", offsetof(struct igc_hw_stats, rpthc)},
162 {"tx_sent_by_host_packets", offsetof(struct igc_hw_stats, hgptc)},
163 {"interrupt_assert_count", offsetof(struct igc_hw_stats, iac)},
164 {"rx_descriptor_lower_threshold",
165 offsetof(struct igc_hw_stats, icrxdmtc)},
168 #define IGC_NB_XSTATS (sizeof(rte_igc_stats_strings) / \
169 sizeof(rte_igc_stats_strings[0]))
171 static int eth_igc_configure(struct rte_eth_dev *dev);
172 static int eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete);
173 static void eth_igc_stop(struct rte_eth_dev *dev);
174 static int eth_igc_start(struct rte_eth_dev *dev);
175 static int eth_igc_set_link_up(struct rte_eth_dev *dev);
176 static int eth_igc_set_link_down(struct rte_eth_dev *dev);
177 static void eth_igc_close(struct rte_eth_dev *dev);
178 static int eth_igc_reset(struct rte_eth_dev *dev);
179 static int eth_igc_promiscuous_enable(struct rte_eth_dev *dev);
180 static int eth_igc_promiscuous_disable(struct rte_eth_dev *dev);
181 static int eth_igc_fw_version_get(struct rte_eth_dev *dev,
182 char *fw_version, size_t fw_size);
183 static int eth_igc_infos_get(struct rte_eth_dev *dev,
184 struct rte_eth_dev_info *dev_info);
185 static int eth_igc_led_on(struct rte_eth_dev *dev);
186 static int eth_igc_led_off(struct rte_eth_dev *dev);
187 static const uint32_t *eth_igc_supported_ptypes_get(struct rte_eth_dev *dev);
188 static int eth_igc_rar_set(struct rte_eth_dev *dev,
189 struct rte_ether_addr *mac_addr, uint32_t index, uint32_t pool);
190 static void eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index);
191 static int eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
192 struct rte_ether_addr *addr);
193 static int eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
194 struct rte_ether_addr *mc_addr_set,
195 uint32_t nb_mc_addr);
196 static int eth_igc_allmulticast_enable(struct rte_eth_dev *dev);
197 static int eth_igc_allmulticast_disable(struct rte_eth_dev *dev);
198 static int eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
199 static int eth_igc_stats_get(struct rte_eth_dev *dev,
200 struct rte_eth_stats *rte_stats);
201 static int eth_igc_xstats_get(struct rte_eth_dev *dev,
202 struct rte_eth_xstat *xstats, unsigned int n);
203 static int eth_igc_xstats_get_by_id(struct rte_eth_dev *dev,
205 uint64_t *values, unsigned int n);
206 static int eth_igc_xstats_get_names(struct rte_eth_dev *dev,
207 struct rte_eth_xstat_name *xstats_names,
209 static int eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
210 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
212 static int eth_igc_xstats_reset(struct rte_eth_dev *dev);
214 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
215 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx);
217 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id);
219 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id);
221 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
223 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf);
224 static int eth_igc_rss_reta_update(struct rte_eth_dev *dev,
225 struct rte_eth_rss_reta_entry64 *reta_conf,
227 static int eth_igc_rss_reta_query(struct rte_eth_dev *dev,
228 struct rte_eth_rss_reta_entry64 *reta_conf,
230 static int eth_igc_rss_hash_update(struct rte_eth_dev *dev,
231 struct rte_eth_rss_conf *rss_conf);
232 static int eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
233 struct rte_eth_rss_conf *rss_conf);
235 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
236 static int eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask);
237 static int eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
238 enum rte_vlan_type vlan_type, uint16_t tpid);
240 static const struct eth_dev_ops eth_igc_ops = {
241 .dev_configure = eth_igc_configure,
242 .link_update = eth_igc_link_update,
243 .dev_stop = eth_igc_stop,
244 .dev_start = eth_igc_start,
245 .dev_close = eth_igc_close,
246 .dev_reset = eth_igc_reset,
247 .dev_set_link_up = eth_igc_set_link_up,
248 .dev_set_link_down = eth_igc_set_link_down,
249 .promiscuous_enable = eth_igc_promiscuous_enable,
250 .promiscuous_disable = eth_igc_promiscuous_disable,
251 .allmulticast_enable = eth_igc_allmulticast_enable,
252 .allmulticast_disable = eth_igc_allmulticast_disable,
253 .fw_version_get = eth_igc_fw_version_get,
254 .dev_infos_get = eth_igc_infos_get,
255 .dev_led_on = eth_igc_led_on,
256 .dev_led_off = eth_igc_led_off,
257 .dev_supported_ptypes_get = eth_igc_supported_ptypes_get,
258 .mtu_set = eth_igc_mtu_set,
259 .mac_addr_add = eth_igc_rar_set,
260 .mac_addr_remove = eth_igc_rar_clear,
261 .mac_addr_set = eth_igc_default_mac_addr_set,
262 .set_mc_addr_list = eth_igc_set_mc_addr_list,
264 .rx_queue_setup = eth_igc_rx_queue_setup,
265 .rx_queue_release = eth_igc_rx_queue_release,
266 .rx_queue_count = eth_igc_rx_queue_count,
267 .rx_descriptor_done = eth_igc_rx_descriptor_done,
268 .rx_descriptor_status = eth_igc_rx_descriptor_status,
269 .tx_descriptor_status = eth_igc_tx_descriptor_status,
270 .tx_queue_setup = eth_igc_tx_queue_setup,
271 .tx_queue_release = eth_igc_tx_queue_release,
272 .tx_done_cleanup = eth_igc_tx_done_cleanup,
273 .rxq_info_get = eth_igc_rxq_info_get,
274 .txq_info_get = eth_igc_txq_info_get,
275 .stats_get = eth_igc_stats_get,
276 .xstats_get = eth_igc_xstats_get,
277 .xstats_get_by_id = eth_igc_xstats_get_by_id,
278 .xstats_get_names_by_id = eth_igc_xstats_get_names_by_id,
279 .xstats_get_names = eth_igc_xstats_get_names,
280 .stats_reset = eth_igc_xstats_reset,
281 .xstats_reset = eth_igc_xstats_reset,
282 .queue_stats_mapping_set = eth_igc_queue_stats_mapping_set,
283 .rx_queue_intr_enable = eth_igc_rx_queue_intr_enable,
284 .rx_queue_intr_disable = eth_igc_rx_queue_intr_disable,
285 .flow_ctrl_get = eth_igc_flow_ctrl_get,
286 .flow_ctrl_set = eth_igc_flow_ctrl_set,
287 .reta_update = eth_igc_rss_reta_update,
288 .reta_query = eth_igc_rss_reta_query,
289 .rss_hash_update = eth_igc_rss_hash_update,
290 .rss_hash_conf_get = eth_igc_rss_hash_conf_get,
291 .vlan_filter_set = eth_igc_vlan_filter_set,
292 .vlan_offload_set = eth_igc_vlan_offload_set,
293 .vlan_tpid_set = eth_igc_vlan_tpid_set,
294 .vlan_strip_queue_set = eth_igc_vlan_strip_queue_set,
298 * multiple queue mode checking
301 igc_check_mq_mode(struct rte_eth_dev *dev)
303 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
304 enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
306 if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
307 PMD_INIT_LOG(ERR, "SRIOV is not supported.");
311 if (rx_mq_mode != ETH_MQ_RX_NONE &&
312 rx_mq_mode != ETH_MQ_RX_RSS) {
313 /* RSS together with VMDq not supported*/
314 PMD_INIT_LOG(ERR, "RX mode %d is not supported.",
319 /* To no break software that set invalid mode, only display
320 * warning if invalid mode is used.
322 if (tx_mq_mode != ETH_MQ_TX_NONE)
323 PMD_INIT_LOG(WARNING,
324 "TX mode %d is not supported. Due to meaningless in this driver, just ignore",
331 eth_igc_configure(struct rte_eth_dev *dev)
333 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
336 PMD_INIT_FUNC_TRACE();
338 ret = igc_check_mq_mode(dev);
342 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
347 eth_igc_set_link_up(struct rte_eth_dev *dev)
349 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
351 if (hw->phy.media_type == igc_media_type_copper)
352 igc_power_up_phy(hw);
354 igc_power_up_fiber_serdes_link(hw);
359 eth_igc_set_link_down(struct rte_eth_dev *dev)
361 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
363 if (hw->phy.media_type == igc_media_type_copper)
364 igc_power_down_phy(hw);
366 igc_shutdown_fiber_serdes_link(hw);
371 * disable other interrupt
374 igc_intr_other_disable(struct rte_eth_dev *dev)
376 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
377 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
378 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
380 if (rte_intr_allow_others(intr_handle) &&
381 dev->data->dev_conf.intr_conf.lsc) {
382 IGC_WRITE_REG(hw, IGC_EIMC, 1u << IGC_MSIX_OTHER_INTR_VEC);
385 IGC_WRITE_REG(hw, IGC_IMC, ~0);
390 * enable other interrupt
393 igc_intr_other_enable(struct rte_eth_dev *dev)
395 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
396 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
397 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
398 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
400 if (rte_intr_allow_others(intr_handle) &&
401 dev->data->dev_conf.intr_conf.lsc) {
402 IGC_WRITE_REG(hw, IGC_EIMS, 1u << IGC_MSIX_OTHER_INTR_VEC);
405 IGC_WRITE_REG(hw, IGC_IMS, intr->mask);
410 * It reads ICR and gets interrupt causes, check it and set a bit flag
411 * to update link status.
414 eth_igc_interrupt_get_status(struct rte_eth_dev *dev)
417 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
418 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
420 /* read-on-clear nic registers here */
421 icr = IGC_READ_REG(hw, IGC_ICR);
424 if (icr & IGC_ICR_LSC)
425 intr->flags |= IGC_FLAG_NEED_LINK_UPDATE;
428 /* return 0 means link status changed, -1 means not changed */
430 eth_igc_link_update(struct rte_eth_dev *dev, int wait_to_complete)
432 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
433 struct rte_eth_link link;
434 int link_check, count;
437 hw->mac.get_link_status = 1;
439 /* possible wait-to-complete in up to 9 seconds */
440 for (count = 0; count < IGC_LINK_UPDATE_CHECK_TIMEOUT; count++) {
441 /* Read the real link status */
442 switch (hw->phy.media_type) {
443 case igc_media_type_copper:
444 /* Do the work to read phy */
445 igc_check_for_link(hw);
446 link_check = !hw->mac.get_link_status;
449 case igc_media_type_fiber:
450 igc_check_for_link(hw);
451 link_check = (IGC_READ_REG(hw, IGC_STATUS) &
455 case igc_media_type_internal_serdes:
456 igc_check_for_link(hw);
457 link_check = hw->mac.serdes_has_link;
463 if (link_check || wait_to_complete == 0)
465 rte_delay_ms(IGC_LINK_UPDATE_CHECK_INTERVAL);
467 memset(&link, 0, sizeof(link));
469 /* Now we check if a transition has happened */
471 uint16_t duplex, speed;
472 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
473 link.link_duplex = (duplex == FULL_DUPLEX) ?
474 ETH_LINK_FULL_DUPLEX :
475 ETH_LINK_HALF_DUPLEX;
476 link.link_speed = speed;
477 link.link_status = ETH_LINK_UP;
478 link.link_autoneg = !(dev->data->dev_conf.link_speeds &
479 ETH_LINK_SPEED_FIXED);
481 if (speed == SPEED_2500) {
482 uint32_t tipg = IGC_READ_REG(hw, IGC_TIPG);
483 if ((tipg & IGC_TIPG_IPGT_MASK) != 0x0b) {
484 tipg &= ~IGC_TIPG_IPGT_MASK;
486 IGC_WRITE_REG(hw, IGC_TIPG, tipg);
491 link.link_duplex = ETH_LINK_HALF_DUPLEX;
492 link.link_status = ETH_LINK_DOWN;
493 link.link_autoneg = ETH_LINK_FIXED;
496 return rte_eth_linkstatus_set(dev, &link);
500 * It executes link_update after knowing an interrupt is present.
503 eth_igc_interrupt_action(struct rte_eth_dev *dev)
505 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
506 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
507 struct rte_eth_link link;
510 if (intr->flags & IGC_FLAG_NEED_LINK_UPDATE) {
511 intr->flags &= ~IGC_FLAG_NEED_LINK_UPDATE;
513 /* set get_link_status to check register later */
514 ret = eth_igc_link_update(dev, 0);
516 /* check if link has changed */
520 rte_eth_linkstatus_get(dev, &link);
521 if (link.link_status)
523 " Port %d: Link Up - speed %u Mbps - %s",
525 (unsigned int)link.link_speed,
526 link.link_duplex == ETH_LINK_FULL_DUPLEX ?
527 "full-duplex" : "half-duplex");
529 PMD_DRV_LOG(INFO, " Port %d: Link Down",
532 PMD_DRV_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
533 pci_dev->addr.domain,
536 pci_dev->addr.function);
537 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
543 * Interrupt handler which shall be registered at first.
546 * Pointer to interrupt handle.
548 * The address of parameter (struct rte_eth_dev *) registered before.
551 eth_igc_interrupt_handler(void *param)
553 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
555 eth_igc_interrupt_get_status(dev);
556 eth_igc_interrupt_action(dev);
559 static void igc_read_queue_stats_register(struct rte_eth_dev *dev);
562 * Update the queue status every IGC_ALARM_INTERVAL time.
564 * The address of parameter (struct rte_eth_dev *) registered before.
567 igc_update_queue_stats_handler(void *param)
569 struct rte_eth_dev *dev = param;
570 igc_read_queue_stats_register(dev);
571 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
572 igc_update_queue_stats_handler, dev);
576 * rx,tx enable/disable
579 eth_igc_rxtx_control(struct rte_eth_dev *dev, bool enable)
581 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
584 tctl = IGC_READ_REG(hw, IGC_TCTL);
585 rctl = IGC_READ_REG(hw, IGC_RCTL);
593 tctl &= ~IGC_TCTL_EN;
594 rctl &= ~IGC_RCTL_EN;
596 IGC_WRITE_REG(hw, IGC_TCTL, tctl);
597 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
602 * This routine disables all traffic on the adapter by issuing a
603 * global reset on the MAC.
606 eth_igc_stop(struct rte_eth_dev *dev)
608 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
609 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
610 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
611 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
612 struct rte_eth_link link;
614 adapter->stopped = 1;
616 /* disable receive and transmit */
617 eth_igc_rxtx_control(dev, false);
619 /* disable all MSI-X interrupts */
620 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
623 /* clear all MSI-X interrupts */
624 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
626 igc_intr_other_disable(dev);
628 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
630 /* disable intr eventfd mapping */
631 rte_intr_disable(intr_handle);
635 /* disable all wake up */
636 IGC_WRITE_REG(hw, IGC_WUC, 0);
638 /* Set bit for Go Link disconnect */
639 igc_read_reg_check_set_bits(hw, IGC_82580_PHY_POWER_MGMT,
640 IGC_82580_PM_GO_LINKD);
642 /* Power down the phy. Needed to make the link go Down */
643 eth_igc_set_link_down(dev);
645 igc_dev_clear_queues(dev);
647 /* clear the recorded link status */
648 memset(&link, 0, sizeof(link));
649 rte_eth_linkstatus_set(dev, &link);
651 if (!rte_intr_allow_others(intr_handle))
652 /* resume to the default handler */
653 rte_intr_callback_register(intr_handle,
654 eth_igc_interrupt_handler,
657 /* Clean datapath event and queue/vec mapping */
658 rte_intr_efd_disable(intr_handle);
659 if (intr_handle->intr_vec != NULL) {
660 rte_free(intr_handle->intr_vec);
661 intr_handle->intr_vec = NULL;
666 * write interrupt vector allocation register
668 * board private structure
670 * queue index, valid 0,1,2,3
674 * msix-vector, valid 0,1,2,3,4
677 igc_write_ivar(struct igc_hw *hw, uint8_t queue_index,
678 bool tx, uint8_t msix_vector)
681 uint8_t reg_index = queue_index >> 1;
686 * bit31...24 bit23...16 bit15...8 bit7...0
690 * bit31...24 bit23...16 bit15...8 bit7...0
700 val = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, reg_index);
703 val &= ~((uint32_t)0xFF << offset);
705 /* write vector and valid bit */
706 val |= (uint32_t)(msix_vector | IGC_IVAR_VALID) << offset;
708 IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, reg_index, val);
711 /* Sets up the hardware to generate MSI-X interrupts properly
713 * board private structure
716 igc_configure_msix_intr(struct rte_eth_dev *dev)
718 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
719 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
720 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
723 uint32_t vec = IGC_MISC_VEC_ID;
724 uint32_t base = IGC_MISC_VEC_ID;
725 uint32_t misc_shift = 0;
728 /* won't configure msix register if no mapping is done
729 * between intr vector and event fd
731 if (!rte_intr_dp_is_en(intr_handle))
734 if (rte_intr_allow_others(intr_handle)) {
735 base = IGC_RX_VEC_START;
740 /* turn on MSI-X capability first */
741 IGC_WRITE_REG(hw, IGC_GPIE, IGC_GPIE_MSIX_MODE |
742 IGC_GPIE_PBA | IGC_GPIE_EIAME |
744 intr_mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) <<
747 if (dev->data->dev_conf.intr_conf.lsc)
748 intr_mask |= (1u << IGC_MSIX_OTHER_INTR_VEC);
750 /* enable msix auto-clear */
751 igc_read_reg_check_set_bits(hw, IGC_EIAC, intr_mask);
753 /* set other cause interrupt vector */
754 igc_read_reg_check_set_bits(hw, IGC_IVAR_MISC,
755 (uint32_t)(IGC_MSIX_OTHER_INTR_VEC | IGC_IVAR_VALID) << 8);
757 /* enable auto-mask */
758 igc_read_reg_check_set_bits(hw, IGC_EIAM, intr_mask);
760 for (i = 0; i < dev->data->nb_rx_queues; i++) {
761 igc_write_ivar(hw, i, 0, vec);
762 intr_handle->intr_vec[i] = vec;
763 if (vec < base + intr_handle->nb_efd - 1)
771 * It enables the interrupt mask and then enable the interrupt.
774 * Pointer to struct rte_eth_dev.
779 igc_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
781 struct igc_interrupt *intr = IGC_DEV_PRIVATE_INTR(dev);
784 intr->mask |= IGC_ICR_LSC;
786 intr->mask &= ~IGC_ICR_LSC;
790 * It enables the interrupt.
791 * It will be called once only during nic initialized.
794 igc_rxq_interrupt_setup(struct rte_eth_dev *dev)
797 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
798 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
799 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
800 int misc_shift = rte_intr_allow_others(intr_handle) ? 1 : 0;
802 /* won't configure msix register if no mapping is done
803 * between intr vector and event fd
805 if (!rte_intr_dp_is_en(intr_handle))
808 mask = RTE_LEN2MASK(intr_handle->nb_efd, uint32_t) << misc_shift;
809 IGC_WRITE_REG(hw, IGC_EIMS, mask);
813 * Get hardware rx-buffer size.
816 igc_get_rx_buffer_size(struct igc_hw *hw)
818 return (IGC_READ_REG(hw, IGC_RXPBS) & 0x3f) << 10;
822 * igc_hw_control_acquire sets CTRL_EXT:DRV_LOAD bit.
823 * For ASF and Pass Through versions of f/w this means
824 * that the driver is loaded.
827 igc_hw_control_acquire(struct igc_hw *hw)
831 /* Let firmware know the driver has taken over */
832 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
833 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
837 * igc_hw_control_release resets CTRL_EXT:DRV_LOAD bit.
838 * For ASF and Pass Through versions of f/w this means that the
839 * driver is no longer loaded.
842 igc_hw_control_release(struct igc_hw *hw)
846 /* Let firmware taken over control of h/w */
847 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
848 IGC_WRITE_REG(hw, IGC_CTRL_EXT,
849 ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
853 igc_hardware_init(struct igc_hw *hw)
855 uint32_t rx_buf_size;
858 /* Let the firmware know the OS is in control */
859 igc_hw_control_acquire(hw);
861 /* Issue a global reset */
864 /* disable all wake up */
865 IGC_WRITE_REG(hw, IGC_WUC, 0);
868 * Hardware flow control
869 * - High water mark should allow for at least two standard size (1518)
870 * frames to be received after sending an XOFF.
871 * - Low water mark works best when it is very near the high water mark.
872 * This allows the receiver to restart by sending XON when it has
873 * drained a bit. Here we use an arbitrary value of 1500 which will
874 * restart after one full frame is pulled from the buffer. There
875 * could be several smaller frames in the buffer and if so they will
876 * not trigger the XON until their total number reduces the buffer
879 rx_buf_size = igc_get_rx_buffer_size(hw);
880 hw->fc.high_water = rx_buf_size - (RTE_ETHER_MAX_LEN * 2);
881 hw->fc.low_water = hw->fc.high_water - 1500;
882 hw->fc.pause_time = IGC_FC_PAUSE_TIME;
884 hw->fc.requested_mode = igc_fc_full;
886 diag = igc_init_hw(hw);
890 igc_get_phy_info(hw);
891 igc_check_for_link(hw);
897 eth_igc_start(struct rte_eth_dev *dev)
899 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
900 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
901 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
902 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
906 PMD_INIT_FUNC_TRACE();
908 /* disable all MSI-X interrupts */
909 IGC_WRITE_REG(hw, IGC_EIMC, 0x1f);
912 /* clear all MSI-X interrupts */
913 IGC_WRITE_REG(hw, IGC_EICR, 0x1f);
915 /* disable uio/vfio intr/eventfd mapping */
916 if (!adapter->stopped)
917 rte_intr_disable(intr_handle);
919 /* Power up the phy. Needed to make the link go Up */
920 eth_igc_set_link_up(dev);
922 /* Put the address into the Receive Address Array */
923 igc_rar_set(hw, hw->mac.addr, 0);
925 /* Initialize the hardware */
926 if (igc_hardware_init(hw)) {
927 PMD_DRV_LOG(ERR, "Unable to initialize the hardware");
930 adapter->stopped = 0;
932 /* check and configure queue intr-vector mapping */
933 if (rte_intr_cap_multiple(intr_handle) &&
934 dev->data->dev_conf.intr_conf.rxq) {
935 uint32_t intr_vector = dev->data->nb_rx_queues;
936 if (rte_intr_efd_enable(intr_handle, intr_vector))
940 if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) {
941 intr_handle->intr_vec = rte_zmalloc("intr_vec",
942 dev->data->nb_rx_queues * sizeof(int), 0);
943 if (intr_handle->intr_vec == NULL) {
945 "Failed to allocate %d rx_queues intr_vec",
946 dev->data->nb_rx_queues);
951 /* configure msix for rx interrupt */
952 igc_configure_msix_intr(dev);
956 /* This can fail when allocating mbufs for descriptor rings */
957 ret = igc_rx_init(dev);
959 PMD_DRV_LOG(ERR, "Unable to initialize RX hardware");
960 igc_dev_clear_queues(dev);
964 igc_clear_hw_cntrs_base_generic(hw);
966 /* VLAN Offload Settings */
967 eth_igc_vlan_offload_set(dev,
968 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
969 ETH_VLAN_EXTEND_MASK);
971 /* Setup link speed and duplex */
972 speeds = &dev->data->dev_conf.link_speeds;
973 if (*speeds == ETH_LINK_SPEED_AUTONEG) {
974 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
978 bool autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
981 hw->phy.autoneg_advertised = 0;
983 if (*speeds & ~(ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
984 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
985 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
986 ETH_LINK_SPEED_FIXED)) {
988 goto error_invalid_config;
990 if (*speeds & ETH_LINK_SPEED_10M_HD) {
991 hw->phy.autoneg_advertised |= ADVERTISE_10_HALF;
994 if (*speeds & ETH_LINK_SPEED_10M) {
995 hw->phy.autoneg_advertised |= ADVERTISE_10_FULL;
998 if (*speeds & ETH_LINK_SPEED_100M_HD) {
999 hw->phy.autoneg_advertised |= ADVERTISE_100_HALF;
1002 if (*speeds & ETH_LINK_SPEED_100M) {
1003 hw->phy.autoneg_advertised |= ADVERTISE_100_FULL;
1006 if (*speeds & ETH_LINK_SPEED_1G) {
1007 hw->phy.autoneg_advertised |= ADVERTISE_1000_FULL;
1010 if (*speeds & ETH_LINK_SPEED_2_5G) {
1011 hw->phy.autoneg_advertised |= ADVERTISE_2500_FULL;
1014 if (num_speeds == 0 || (!autoneg && num_speeds > 1))
1015 goto error_invalid_config;
1017 /* Set/reset the mac.autoneg based on the link speed,
1021 hw->mac.autoneg = 0;
1022 hw->mac.forced_speed_duplex =
1023 hw->phy.autoneg_advertised;
1025 hw->mac.autoneg = 1;
1031 if (rte_intr_allow_others(intr_handle)) {
1032 /* check if lsc interrupt is enabled */
1033 if (dev->data->dev_conf.intr_conf.lsc)
1034 igc_lsc_interrupt_setup(dev, 1);
1036 igc_lsc_interrupt_setup(dev, 0);
1038 rte_intr_callback_unregister(intr_handle,
1039 eth_igc_interrupt_handler,
1041 if (dev->data->dev_conf.intr_conf.lsc)
1043 "LSC won't enable because of no intr multiplex");
1046 /* enable uio/vfio intr/eventfd mapping */
1047 rte_intr_enable(intr_handle);
1049 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1050 igc_update_queue_stats_handler, dev);
1052 /* check if rxq interrupt is enabled */
1053 if (dev->data->dev_conf.intr_conf.rxq &&
1054 rte_intr_dp_is_en(intr_handle))
1055 igc_rxq_interrupt_setup(dev);
1057 /* resume enabled intr since hw reset */
1058 igc_intr_other_enable(dev);
1060 eth_igc_rxtx_control(dev, true);
1061 eth_igc_link_update(dev, 0);
1065 error_invalid_config:
1066 PMD_DRV_LOG(ERR, "Invalid advertised speeds (%u) for port %u",
1067 dev->data->dev_conf.link_speeds, dev->data->port_id);
1068 igc_dev_clear_queues(dev);
1073 igc_reset_swfw_lock(struct igc_hw *hw)
1078 * Do mac ops initialization manually here, since we will need
1079 * some function pointers set by this call.
1081 ret_val = igc_init_mac_params(hw);
1086 * SMBI lock should not fail in this early stage. If this is the case,
1087 * it is due to an improper exit of the application.
1088 * So force the release of the faulty lock.
1090 if (igc_get_hw_semaphore_generic(hw) < 0)
1091 PMD_DRV_LOG(DEBUG, "SMBI lock released");
1093 igc_put_hw_semaphore_generic(hw);
1095 if (hw->mac.ops.acquire_swfw_sync != NULL) {
1099 * Phy lock should not fail in this early stage.
1100 * If this is the case, it is due to an improper exit of the
1101 * application. So force the release of the faulty lock.
1103 mask = IGC_SWFW_PHY0_SM;
1104 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0) {
1105 PMD_DRV_LOG(DEBUG, "SWFW phy%d lock released",
1108 hw->mac.ops.release_swfw_sync(hw, mask);
1111 * This one is more tricky since it is common to all ports; but
1112 * swfw_sync retries last long enough (1s) to be almost sure
1113 * that if lock can not be taken it is due to an improper lock
1116 mask = IGC_SWFW_EEP_SM;
1117 if (hw->mac.ops.acquire_swfw_sync(hw, mask) < 0)
1118 PMD_DRV_LOG(DEBUG, "SWFW common locks released");
1120 hw->mac.ops.release_swfw_sync(hw, mask);
1127 * free all rx/tx queues.
1130 igc_dev_free_queues(struct rte_eth_dev *dev)
1134 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1135 eth_igc_rx_queue_release(dev->data->rx_queues[i]);
1136 dev->data->rx_queues[i] = NULL;
1138 dev->data->nb_rx_queues = 0;
1140 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1141 eth_igc_tx_queue_release(dev->data->tx_queues[i]);
1142 dev->data->tx_queues[i] = NULL;
1144 dev->data->nb_tx_queues = 0;
1148 eth_igc_close(struct rte_eth_dev *dev)
1150 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1151 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
1152 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1153 struct igc_adapter *adapter = IGC_DEV_PRIVATE(dev);
1156 PMD_INIT_FUNC_TRACE();
1158 if (!adapter->stopped)
1161 igc_intr_other_disable(dev);
1163 int ret = rte_intr_callback_unregister(intr_handle,
1164 eth_igc_interrupt_handler, dev);
1165 if (ret >= 0 || ret == -ENOENT || ret == -EINVAL)
1168 PMD_DRV_LOG(ERR, "intr callback unregister failed: %d", ret);
1169 DELAY(200 * 1000); /* delay 200ms */
1170 } while (retry++ < 5);
1172 igc_phy_hw_reset(hw);
1173 igc_hw_control_release(hw);
1174 igc_dev_free_queues(dev);
1176 /* Reset any pending lock */
1177 igc_reset_swfw_lock(hw);
1181 igc_identify_hardware(struct rte_eth_dev *dev, struct rte_pci_device *pci_dev)
1183 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1185 hw->vendor_id = pci_dev->id.vendor_id;
1186 hw->device_id = pci_dev->id.device_id;
1187 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
1188 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
1192 eth_igc_dev_init(struct rte_eth_dev *dev)
1194 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1195 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1196 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1199 PMD_INIT_FUNC_TRACE();
1200 dev->dev_ops = ð_igc_ops;
1203 * for secondary processes, we don't initialize any further as primary
1204 * has already done this work. Only check we don't need a different
1207 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1210 rte_eth_copy_pci_info(dev, pci_dev);
1213 hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
1215 igc_identify_hardware(dev, pci_dev);
1216 if (igc_setup_init_funcs(hw, false) != IGC_SUCCESS) {
1221 igc_get_bus_info(hw);
1223 /* Reset any pending lock */
1224 if (igc_reset_swfw_lock(hw) != IGC_SUCCESS) {
1229 /* Finish initialization */
1230 if (igc_setup_init_funcs(hw, true) != IGC_SUCCESS) {
1235 hw->mac.autoneg = 1;
1236 hw->phy.autoneg_wait_to_complete = 0;
1237 hw->phy.autoneg_advertised = IGC_ALL_SPEED_DUPLEX_2500;
1239 /* Copper options */
1240 if (hw->phy.media_type == igc_media_type_copper) {
1241 hw->phy.mdix = 0; /* AUTO_ALL_MODES */
1242 hw->phy.disable_polarity_correction = 0;
1243 hw->phy.ms_type = igc_ms_hw_default;
1247 * Start from a known state, this is important in reading the nvm
1248 * and mac from that.
1252 /* Make sure we have a good EEPROM before we read from it */
1253 if (igc_validate_nvm_checksum(hw) < 0) {
1255 * Some PCI-E parts fail the first check due to
1256 * the link being in sleep state, call it again,
1257 * if it fails a second time its a real issue.
1259 if (igc_validate_nvm_checksum(hw) < 0) {
1260 PMD_INIT_LOG(ERR, "EEPROM checksum invalid");
1266 /* Read the permanent MAC address out of the EEPROM */
1267 if (igc_read_mac_addr(hw) != 0) {
1268 PMD_INIT_LOG(ERR, "EEPROM error while reading MAC address");
1273 /* Allocate memory for storing MAC addresses */
1274 dev->data->mac_addrs = rte_zmalloc("igc",
1275 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count, 0);
1276 if (dev->data->mac_addrs == NULL) {
1277 PMD_INIT_LOG(ERR, "Failed to allocate %d bytes for storing MAC",
1278 RTE_ETHER_ADDR_LEN * hw->mac.rar_entry_count);
1283 /* Copy the permanent MAC address */
1284 rte_ether_addr_copy((struct rte_ether_addr *)hw->mac.addr,
1285 &dev->data->mac_addrs[0]);
1287 /* Now initialize the hardware */
1288 if (igc_hardware_init(hw) != 0) {
1289 PMD_INIT_LOG(ERR, "Hardware initialization failed");
1290 rte_free(dev->data->mac_addrs);
1291 dev->data->mac_addrs = NULL;
1296 /* Pass the information to the rte_eth_dev_close() that it should also
1297 * release the private port resources.
1299 dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1301 hw->mac.get_link_status = 1;
1304 /* Indicate SOL/IDER usage */
1305 if (igc_check_reset_block(hw) < 0)
1307 "PHY reset is blocked due to SOL/IDER session.");
1309 PMD_INIT_LOG(DEBUG, "port_id %d vendorID=0x%x deviceID=0x%x",
1310 dev->data->port_id, pci_dev->id.vendor_id,
1311 pci_dev->id.device_id);
1313 rte_intr_callback_register(&pci_dev->intr_handle,
1314 eth_igc_interrupt_handler, (void *)dev);
1316 /* enable uio/vfio intr/eventfd mapping */
1317 rte_intr_enable(&pci_dev->intr_handle);
1319 /* enable support intr */
1320 igc_intr_other_enable(dev);
1322 /* initiate queue status */
1323 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1324 igc->txq_stats_map[i] = -1;
1325 igc->rxq_stats_map[i] = -1;
1331 igc_hw_control_release(hw);
1336 eth_igc_dev_uninit(__rte_unused struct rte_eth_dev *eth_dev)
1338 PMD_INIT_FUNC_TRACE();
1340 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1343 eth_igc_close(eth_dev);
1348 eth_igc_reset(struct rte_eth_dev *dev)
1352 PMD_INIT_FUNC_TRACE();
1354 ret = eth_igc_dev_uninit(dev);
1358 return eth_igc_dev_init(dev);
1362 eth_igc_promiscuous_enable(struct rte_eth_dev *dev)
1364 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1367 rctl = IGC_READ_REG(hw, IGC_RCTL);
1368 rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
1369 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1374 eth_igc_promiscuous_disable(struct rte_eth_dev *dev)
1376 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1379 rctl = IGC_READ_REG(hw, IGC_RCTL);
1380 rctl &= (~IGC_RCTL_UPE);
1381 if (dev->data->all_multicast == 1)
1382 rctl |= IGC_RCTL_MPE;
1384 rctl &= (~IGC_RCTL_MPE);
1385 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1390 eth_igc_allmulticast_enable(struct rte_eth_dev *dev)
1392 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1395 rctl = IGC_READ_REG(hw, IGC_RCTL);
1396 rctl |= IGC_RCTL_MPE;
1397 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1402 eth_igc_allmulticast_disable(struct rte_eth_dev *dev)
1404 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1407 if (dev->data->promiscuous == 1)
1408 return 0; /* must remain in all_multicast mode */
1410 rctl = IGC_READ_REG(hw, IGC_RCTL);
1411 rctl &= (~IGC_RCTL_MPE);
1412 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1417 eth_igc_fw_version_get(struct rte_eth_dev *dev, char *fw_version,
1420 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1421 struct igc_fw_version fw;
1424 igc_get_fw_version(hw, &fw);
1426 /* if option rom is valid, display its version too */
1428 ret = snprintf(fw_version, fw_size,
1429 "%d.%d, 0x%08x, %d.%d.%d",
1430 fw.eep_major, fw.eep_minor, fw.etrack_id,
1431 fw.or_major, fw.or_build, fw.or_patch);
1434 if (fw.etrack_id != 0X0000) {
1435 ret = snprintf(fw_version, fw_size,
1437 fw.eep_major, fw.eep_minor,
1440 ret = snprintf(fw_version, fw_size,
1442 fw.eep_major, fw.eep_minor,
1447 ret += 1; /* add the size of '\0' */
1448 if (fw_size < (u32)ret)
1455 eth_igc_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1457 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1459 dev_info->min_rx_bufsize = 256; /* See BSIZE field of RCTL register. */
1460 dev_info->max_rx_pktlen = MAX_RX_JUMBO_FRAME_SIZE;
1461 dev_info->max_mac_addrs = hw->mac.rar_entry_count;
1462 dev_info->rx_offload_capa = IGC_RX_OFFLOAD_ALL;
1463 dev_info->tx_offload_capa = IGC_TX_OFFLOAD_ALL;
1464 dev_info->rx_queue_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1466 dev_info->max_rx_queues = IGC_QUEUE_PAIRS_NUM;
1467 dev_info->max_tx_queues = IGC_QUEUE_PAIRS_NUM;
1468 dev_info->max_vmdq_pools = 0;
1470 dev_info->hash_key_size = IGC_HKEY_MAX_INDEX * sizeof(uint32_t);
1471 dev_info->reta_size = ETH_RSS_RETA_SIZE_128;
1472 dev_info->flow_type_rss_offloads = IGC_RSS_OFFLOAD_ALL;
1474 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1476 .pthresh = IGC_DEFAULT_RX_PTHRESH,
1477 .hthresh = IGC_DEFAULT_RX_HTHRESH,
1478 .wthresh = IGC_DEFAULT_RX_WTHRESH,
1480 .rx_free_thresh = IGC_DEFAULT_RX_FREE_THRESH,
1485 dev_info->default_txconf = (struct rte_eth_txconf) {
1487 .pthresh = IGC_DEFAULT_TX_PTHRESH,
1488 .hthresh = IGC_DEFAULT_TX_HTHRESH,
1489 .wthresh = IGC_DEFAULT_TX_WTHRESH,
1494 dev_info->rx_desc_lim = rx_desc_lim;
1495 dev_info->tx_desc_lim = tx_desc_lim;
1497 dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
1498 ETH_LINK_SPEED_100M_HD | ETH_LINK_SPEED_100M |
1499 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G;
1501 dev_info->max_mtu = dev_info->max_rx_pktlen - IGC_ETH_OVERHEAD;
1502 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
1507 eth_igc_led_on(struct rte_eth_dev *dev)
1509 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1511 return igc_led_on(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1515 eth_igc_led_off(struct rte_eth_dev *dev)
1517 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1519 return igc_led_off(hw) == IGC_SUCCESS ? 0 : -ENOTSUP;
1522 static const uint32_t *
1523 eth_igc_supported_ptypes_get(__rte_unused struct rte_eth_dev *dev)
1525 static const uint32_t ptypes[] = {
1526 /* refers to rx_desc_pkt_info_to_pkt_type() */
1529 RTE_PTYPE_L3_IPV4_EXT,
1531 RTE_PTYPE_L3_IPV6_EXT,
1535 RTE_PTYPE_TUNNEL_IP,
1536 RTE_PTYPE_INNER_L3_IPV6,
1537 RTE_PTYPE_INNER_L3_IPV6_EXT,
1538 RTE_PTYPE_INNER_L4_TCP,
1539 RTE_PTYPE_INNER_L4_UDP,
1547 eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1549 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1550 uint32_t frame_size = mtu + IGC_ETH_OVERHEAD;
1553 /* if extend vlan has been enabled */
1554 if (IGC_READ_REG(hw, IGC_CTRL_EXT) & IGC_CTRL_EXT_EXT_VLAN)
1555 frame_size += VLAN_TAG_SIZE;
1557 /* check that mtu is within the allowed range */
1558 if (mtu < RTE_ETHER_MIN_MTU ||
1559 frame_size > MAX_RX_JUMBO_FRAME_SIZE)
1563 * refuse mtu that requires the support of scattered packets when
1564 * this feature has not been enabled before.
1566 if (!dev->data->scattered_rx &&
1567 frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)
1570 rctl = IGC_READ_REG(hw, IGC_RCTL);
1572 /* switch to jumbo mode if needed */
1573 if (mtu > RTE_ETHER_MTU) {
1574 dev->data->dev_conf.rxmode.offloads |=
1575 DEV_RX_OFFLOAD_JUMBO_FRAME;
1576 rctl |= IGC_RCTL_LPE;
1578 dev->data->dev_conf.rxmode.offloads &=
1579 ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1580 rctl &= ~IGC_RCTL_LPE;
1582 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1584 /* update max frame size */
1585 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1587 IGC_WRITE_REG(hw, IGC_RLPML,
1588 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1594 eth_igc_rar_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1595 uint32_t index, uint32_t pool)
1597 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1599 igc_rar_set(hw, mac_addr->addr_bytes, index);
1605 eth_igc_rar_clear(struct rte_eth_dev *dev, uint32_t index)
1607 uint8_t addr[RTE_ETHER_ADDR_LEN];
1608 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1610 memset(addr, 0, sizeof(addr));
1611 igc_rar_set(hw, addr, index);
1615 eth_igc_default_mac_addr_set(struct rte_eth_dev *dev,
1616 struct rte_ether_addr *addr)
1618 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1619 igc_rar_set(hw, addr->addr_bytes, 0);
1624 eth_igc_set_mc_addr_list(struct rte_eth_dev *dev,
1625 struct rte_ether_addr *mc_addr_set,
1626 uint32_t nb_mc_addr)
1628 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1629 igc_update_mc_addr_list(hw, (u8 *)mc_addr_set, nb_mc_addr);
1634 * Read hardware registers
1637 igc_read_stats_registers(struct igc_hw *hw, struct igc_hw_stats *stats)
1641 uint64_t old_gprc = stats->gprc;
1642 uint64_t old_gptc = stats->gptc;
1643 uint64_t old_tpr = stats->tpr;
1644 uint64_t old_tpt = stats->tpt;
1645 uint64_t old_rpthc = stats->rpthc;
1646 uint64_t old_hgptc = stats->hgptc;
1648 stats->crcerrs += IGC_READ_REG(hw, IGC_CRCERRS);
1649 stats->algnerrc += IGC_READ_REG(hw, IGC_ALGNERRC);
1650 stats->rxerrc += IGC_READ_REG(hw, IGC_RXERRC);
1651 stats->mpc += IGC_READ_REG(hw, IGC_MPC);
1652 stats->scc += IGC_READ_REG(hw, IGC_SCC);
1653 stats->ecol += IGC_READ_REG(hw, IGC_ECOL);
1655 stats->mcc += IGC_READ_REG(hw, IGC_MCC);
1656 stats->latecol += IGC_READ_REG(hw, IGC_LATECOL);
1657 stats->colc += IGC_READ_REG(hw, IGC_COLC);
1659 stats->dc += IGC_READ_REG(hw, IGC_DC);
1660 stats->tncrs += IGC_READ_REG(hw, IGC_TNCRS);
1661 stats->htdpmc += IGC_READ_REG(hw, IGC_HTDPMC);
1662 stats->rlec += IGC_READ_REG(hw, IGC_RLEC);
1663 stats->xonrxc += IGC_READ_REG(hw, IGC_XONRXC);
1664 stats->xontxc += IGC_READ_REG(hw, IGC_XONTXC);
1667 * For watchdog management we need to know if we have been
1668 * paused during the last interval, so capture that here.
1670 pause_frames = IGC_READ_REG(hw, IGC_XOFFRXC);
1671 stats->xoffrxc += pause_frames;
1672 stats->xofftxc += IGC_READ_REG(hw, IGC_XOFFTXC);
1673 stats->fcruc += IGC_READ_REG(hw, IGC_FCRUC);
1674 stats->prc64 += IGC_READ_REG(hw, IGC_PRC64);
1675 stats->prc127 += IGC_READ_REG(hw, IGC_PRC127);
1676 stats->prc255 += IGC_READ_REG(hw, IGC_PRC255);
1677 stats->prc511 += IGC_READ_REG(hw, IGC_PRC511);
1678 stats->prc1023 += IGC_READ_REG(hw, IGC_PRC1023);
1679 stats->prc1522 += IGC_READ_REG(hw, IGC_PRC1522);
1680 stats->gprc += IGC_READ_REG(hw, IGC_GPRC);
1681 stats->bprc += IGC_READ_REG(hw, IGC_BPRC);
1682 stats->mprc += IGC_READ_REG(hw, IGC_MPRC);
1683 stats->gptc += IGC_READ_REG(hw, IGC_GPTC);
1685 /* For the 64-bit byte counters the low dword must be read first. */
1686 /* Both registers clear on the read of the high dword */
1688 /* Workaround CRC bytes included in size, take away 4 bytes/packet */
1689 stats->gorc += IGC_READ_REG(hw, IGC_GORCL);
1690 stats->gorc += ((uint64_t)IGC_READ_REG(hw, IGC_GORCH) << 32);
1691 stats->gorc -= (stats->gprc - old_gprc) * RTE_ETHER_CRC_LEN;
1692 stats->gotc += IGC_READ_REG(hw, IGC_GOTCL);
1693 stats->gotc += ((uint64_t)IGC_READ_REG(hw, IGC_GOTCH) << 32);
1694 stats->gotc -= (stats->gptc - old_gptc) * RTE_ETHER_CRC_LEN;
1696 stats->rnbc += IGC_READ_REG(hw, IGC_RNBC);
1697 stats->ruc += IGC_READ_REG(hw, IGC_RUC);
1698 stats->rfc += IGC_READ_REG(hw, IGC_RFC);
1699 stats->roc += IGC_READ_REG(hw, IGC_ROC);
1700 stats->rjc += IGC_READ_REG(hw, IGC_RJC);
1702 stats->mgprc += IGC_READ_REG(hw, IGC_MGTPRC);
1703 stats->mgpdc += IGC_READ_REG(hw, IGC_MGTPDC);
1704 stats->mgptc += IGC_READ_REG(hw, IGC_MGTPTC);
1705 stats->b2ospc += IGC_READ_REG(hw, IGC_B2OSPC);
1706 stats->b2ogprc += IGC_READ_REG(hw, IGC_B2OGPRC);
1707 stats->o2bgptc += IGC_READ_REG(hw, IGC_O2BGPTC);
1708 stats->o2bspc += IGC_READ_REG(hw, IGC_O2BSPC);
1710 stats->tpr += IGC_READ_REG(hw, IGC_TPR);
1711 stats->tpt += IGC_READ_REG(hw, IGC_TPT);
1713 stats->tor += IGC_READ_REG(hw, IGC_TORL);
1714 stats->tor += ((uint64_t)IGC_READ_REG(hw, IGC_TORH) << 32);
1715 stats->tor -= (stats->tpr - old_tpr) * RTE_ETHER_CRC_LEN;
1716 stats->tot += IGC_READ_REG(hw, IGC_TOTL);
1717 stats->tot += ((uint64_t)IGC_READ_REG(hw, IGC_TOTH) << 32);
1718 stats->tot -= (stats->tpt - old_tpt) * RTE_ETHER_CRC_LEN;
1720 stats->ptc64 += IGC_READ_REG(hw, IGC_PTC64);
1721 stats->ptc127 += IGC_READ_REG(hw, IGC_PTC127);
1722 stats->ptc255 += IGC_READ_REG(hw, IGC_PTC255);
1723 stats->ptc511 += IGC_READ_REG(hw, IGC_PTC511);
1724 stats->ptc1023 += IGC_READ_REG(hw, IGC_PTC1023);
1725 stats->ptc1522 += IGC_READ_REG(hw, IGC_PTC1522);
1726 stats->mptc += IGC_READ_REG(hw, IGC_MPTC);
1727 stats->bptc += IGC_READ_REG(hw, IGC_BPTC);
1728 stats->tsctc += IGC_READ_REG(hw, IGC_TSCTC);
1730 stats->iac += IGC_READ_REG(hw, IGC_IAC);
1731 stats->rpthc += IGC_READ_REG(hw, IGC_RPTHC);
1732 stats->hgptc += IGC_READ_REG(hw, IGC_HGPTC);
1733 stats->icrxdmtc += IGC_READ_REG(hw, IGC_ICRXDMTC);
1735 /* Host to Card Statistics */
1736 stats->hgorc += IGC_READ_REG(hw, IGC_HGORCL);
1737 stats->hgorc += ((uint64_t)IGC_READ_REG(hw, IGC_HGORCH) << 32);
1738 stats->hgorc -= (stats->rpthc - old_rpthc) * RTE_ETHER_CRC_LEN;
1739 stats->hgotc += IGC_READ_REG(hw, IGC_HGOTCL);
1740 stats->hgotc += ((uint64_t)IGC_READ_REG(hw, IGC_HGOTCH) << 32);
1741 stats->hgotc -= (stats->hgptc - old_hgptc) * RTE_ETHER_CRC_LEN;
1742 stats->lenerrs += IGC_READ_REG(hw, IGC_LENERRS);
1746 * Write 0 to all queue status registers
1749 igc_reset_queue_stats_register(struct igc_hw *hw)
1753 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1754 IGC_WRITE_REG(hw, IGC_PQGPRC(i), 0);
1755 IGC_WRITE_REG(hw, IGC_PQGPTC(i), 0);
1756 IGC_WRITE_REG(hw, IGC_PQGORC(i), 0);
1757 IGC_WRITE_REG(hw, IGC_PQGOTC(i), 0);
1758 IGC_WRITE_REG(hw, IGC_PQMPRC(i), 0);
1759 IGC_WRITE_REG(hw, IGC_RQDPC(i), 0);
1760 IGC_WRITE_REG(hw, IGC_TQDPC(i), 0);
1765 * Read all hardware queue status registers
1768 igc_read_queue_stats_register(struct rte_eth_dev *dev)
1770 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1771 struct igc_hw_queue_stats *queue_stats =
1772 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1776 * This register is not cleared on read. Furthermore, the register wraps
1777 * around back to 0x00000000 on the next increment when reaching a value
1778 * of 0xFFFFFFFF and then continues normal count operation.
1780 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1788 * Read the register first, if the value is smaller than that
1789 * previous read, that mean the register has been overflowed,
1790 * then we add the high 4 bytes by 1 and replace the low 4
1791 * bytes by the new value.
1793 tmp = IGC_READ_REG(hw, IGC_PQGPRC(i));
1794 value.ddword = queue_stats->pqgprc[i];
1795 if (value.dword[U32_0_IN_U64] > tmp)
1796 value.dword[U32_1_IN_U64]++;
1797 value.dword[U32_0_IN_U64] = tmp;
1798 queue_stats->pqgprc[i] = value.ddword;
1800 tmp = IGC_READ_REG(hw, IGC_PQGPTC(i));
1801 value.ddword = queue_stats->pqgptc[i];
1802 if (value.dword[U32_0_IN_U64] > tmp)
1803 value.dword[U32_1_IN_U64]++;
1804 value.dword[U32_0_IN_U64] = tmp;
1805 queue_stats->pqgptc[i] = value.ddword;
1807 tmp = IGC_READ_REG(hw, IGC_PQGORC(i));
1808 value.ddword = queue_stats->pqgorc[i];
1809 if (value.dword[U32_0_IN_U64] > tmp)
1810 value.dword[U32_1_IN_U64]++;
1811 value.dword[U32_0_IN_U64] = tmp;
1812 queue_stats->pqgorc[i] = value.ddword;
1814 tmp = IGC_READ_REG(hw, IGC_PQGOTC(i));
1815 value.ddword = queue_stats->pqgotc[i];
1816 if (value.dword[U32_0_IN_U64] > tmp)
1817 value.dword[U32_1_IN_U64]++;
1818 value.dword[U32_0_IN_U64] = tmp;
1819 queue_stats->pqgotc[i] = value.ddword;
1821 tmp = IGC_READ_REG(hw, IGC_PQMPRC(i));
1822 value.ddword = queue_stats->pqmprc[i];
1823 if (value.dword[U32_0_IN_U64] > tmp)
1824 value.dword[U32_1_IN_U64]++;
1825 value.dword[U32_0_IN_U64] = tmp;
1826 queue_stats->pqmprc[i] = value.ddword;
1828 tmp = IGC_READ_REG(hw, IGC_RQDPC(i));
1829 value.ddword = queue_stats->rqdpc[i];
1830 if (value.dword[U32_0_IN_U64] > tmp)
1831 value.dword[U32_1_IN_U64]++;
1832 value.dword[U32_0_IN_U64] = tmp;
1833 queue_stats->rqdpc[i] = value.ddword;
1835 tmp = IGC_READ_REG(hw, IGC_TQDPC(i));
1836 value.ddword = queue_stats->tqdpc[i];
1837 if (value.dword[U32_0_IN_U64] > tmp)
1838 value.dword[U32_1_IN_U64]++;
1839 value.dword[U32_0_IN_U64] = tmp;
1840 queue_stats->tqdpc[i] = value.ddword;
1845 eth_igc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats)
1847 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
1848 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1849 struct igc_hw_stats *stats = IGC_DEV_PRIVATE_STATS(dev);
1850 struct igc_hw_queue_stats *queue_stats =
1851 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1855 * Cancel status handler since it will read the queue status registers
1857 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1859 /* Read status register */
1860 igc_read_queue_stats_register(dev);
1861 igc_read_stats_registers(hw, stats);
1863 if (rte_stats == NULL) {
1864 /* Restart queue status handler */
1865 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1866 igc_update_queue_stats_handler, dev);
1871 rte_stats->imissed = stats->mpc;
1872 rte_stats->ierrors = stats->crcerrs +
1873 stats->rlec + stats->ruc + stats->roc +
1874 stats->rxerrc + stats->algnerrc;
1877 rte_stats->oerrors = stats->ecol + stats->latecol;
1879 rte_stats->ipackets = stats->gprc;
1880 rte_stats->opackets = stats->gptc;
1881 rte_stats->ibytes = stats->gorc;
1882 rte_stats->obytes = stats->gotc;
1884 /* Get per-queue statuses */
1885 for (i = 0; i < IGC_QUEUE_PAIRS_NUM; i++) {
1886 /* GET TX queue statuses */
1887 int map_id = igc->txq_stats_map[i];
1889 rte_stats->q_opackets[map_id] += queue_stats->pqgptc[i];
1890 rte_stats->q_obytes[map_id] += queue_stats->pqgotc[i];
1892 /* Get RX queue statuses */
1893 map_id = igc->rxq_stats_map[i];
1895 rte_stats->q_ipackets[map_id] += queue_stats->pqgprc[i];
1896 rte_stats->q_ibytes[map_id] += queue_stats->pqgorc[i];
1897 rte_stats->q_errors[map_id] += queue_stats->rqdpc[i];
1901 /* Restart queue status handler */
1902 rte_eal_alarm_set(IGC_ALARM_INTERVAL,
1903 igc_update_queue_stats_handler, dev);
1908 eth_igc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1911 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1912 struct igc_hw_stats *hw_stats =
1913 IGC_DEV_PRIVATE_STATS(dev);
1916 igc_read_stats_registers(hw, hw_stats);
1918 if (n < IGC_NB_XSTATS)
1919 return IGC_NB_XSTATS;
1921 /* If this is a reset xstats is NULL, and we have cleared the
1922 * registers by reading them.
1927 /* Extended stats */
1928 for (i = 0; i < IGC_NB_XSTATS; i++) {
1930 xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
1931 rte_igc_stats_strings[i].offset);
1934 return IGC_NB_XSTATS;
1938 eth_igc_xstats_reset(struct rte_eth_dev *dev)
1940 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1941 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
1942 struct igc_hw_queue_stats *queue_stats =
1943 IGC_DEV_PRIVATE_QUEUE_STATS(dev);
1945 /* Cancel queue status handler for avoid conflict */
1946 rte_eal_alarm_cancel(igc_update_queue_stats_handler, dev);
1948 /* HW registers are cleared on read */
1949 igc_reset_queue_stats_register(hw);
1950 igc_read_stats_registers(hw, hw_stats);
1952 /* Reset software totals */
1953 memset(hw_stats, 0, sizeof(*hw_stats));
1954 memset(queue_stats, 0, sizeof(*queue_stats));
1956 /* Restart the queue status handler */
1957 rte_eal_alarm_set(IGC_ALARM_INTERVAL, igc_update_queue_stats_handler,
1964 eth_igc_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1965 struct rte_eth_xstat_name *xstats_names, unsigned int size)
1969 if (xstats_names == NULL)
1970 return IGC_NB_XSTATS;
1972 if (size < IGC_NB_XSTATS) {
1973 PMD_DRV_LOG(ERR, "not enough buffers!");
1974 return IGC_NB_XSTATS;
1977 for (i = 0; i < IGC_NB_XSTATS; i++)
1978 strlcpy(xstats_names[i].name, rte_igc_stats_strings[i].name,
1979 sizeof(xstats_names[i].name));
1981 return IGC_NB_XSTATS;
1985 eth_igc_xstats_get_names_by_id(struct rte_eth_dev *dev,
1986 struct rte_eth_xstat_name *xstats_names, const uint64_t *ids,
1992 return eth_igc_xstats_get_names(dev, xstats_names, limit);
1994 for (i = 0; i < limit; i++) {
1995 if (ids[i] >= IGC_NB_XSTATS) {
1996 PMD_DRV_LOG(ERR, "id value isn't valid");
1999 strlcpy(xstats_names[i].name,
2000 rte_igc_stats_strings[ids[i]].name,
2001 sizeof(xstats_names[i].name));
2007 eth_igc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
2008 uint64_t *values, unsigned int n)
2010 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2011 struct igc_hw_stats *hw_stats = IGC_DEV_PRIVATE_STATS(dev);
2014 igc_read_stats_registers(hw, hw_stats);
2017 if (n < IGC_NB_XSTATS)
2018 return IGC_NB_XSTATS;
2020 /* If this is a reset xstats is NULL, and we have cleared the
2021 * registers by reading them.
2026 /* Extended stats */
2027 for (i = 0; i < IGC_NB_XSTATS; i++)
2028 values[i] = *(uint64_t *)(((char *)hw_stats) +
2029 rte_igc_stats_strings[i].offset);
2031 return IGC_NB_XSTATS;
2034 for (i = 0; i < n; i++) {
2035 if (ids[i] >= IGC_NB_XSTATS) {
2036 PMD_DRV_LOG(ERR, "id value isn't valid");
2039 values[i] = *(uint64_t *)(((char *)hw_stats) +
2040 rte_igc_stats_strings[ids[i]].offset);
2047 eth_igc_queue_stats_mapping_set(struct rte_eth_dev *dev,
2048 uint16_t queue_id, uint8_t stat_idx, uint8_t is_rx)
2050 struct igc_adapter *igc = IGC_DEV_PRIVATE(dev);
2052 /* check queue id is valid */
2053 if (queue_id >= IGC_QUEUE_PAIRS_NUM) {
2054 PMD_DRV_LOG(ERR, "queue id(%u) error, max is %u",
2055 queue_id, IGC_QUEUE_PAIRS_NUM - 1);
2059 /* store the mapping status id */
2061 igc->rxq_stats_map[queue_id] = stat_idx;
2063 igc->txq_stats_map[queue_id] = stat_idx;
2069 eth_igc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2071 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2072 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2073 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2074 uint32_t vec = IGC_MISC_VEC_ID;
2076 if (rte_intr_allow_others(intr_handle))
2077 vec = IGC_RX_VEC_START;
2079 uint32_t mask = 1u << (queue_id + vec);
2081 IGC_WRITE_REG(hw, IGC_EIMC, mask);
2082 IGC_WRITE_FLUSH(hw);
2088 eth_igc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2090 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2091 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2092 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
2093 uint32_t vec = IGC_MISC_VEC_ID;
2095 if (rte_intr_allow_others(intr_handle))
2096 vec = IGC_RX_VEC_START;
2098 uint32_t mask = 1u << (queue_id + vec);
2100 IGC_WRITE_REG(hw, IGC_EIMS, mask);
2101 IGC_WRITE_FLUSH(hw);
2103 rte_intr_enable(intr_handle);
2109 eth_igc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2111 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2116 fc_conf->pause_time = hw->fc.pause_time;
2117 fc_conf->high_water = hw->fc.high_water;
2118 fc_conf->low_water = hw->fc.low_water;
2119 fc_conf->send_xon = hw->fc.send_xon;
2120 fc_conf->autoneg = hw->mac.autoneg;
2123 * Return rx_pause and tx_pause status according to actual setting of
2124 * the TFCE and RFCE bits in the CTRL register.
2126 ctrl = IGC_READ_REG(hw, IGC_CTRL);
2127 if (ctrl & IGC_CTRL_TFCE)
2132 if (ctrl & IGC_CTRL_RFCE)
2137 if (rx_pause && tx_pause)
2138 fc_conf->mode = RTE_FC_FULL;
2140 fc_conf->mode = RTE_FC_RX_PAUSE;
2142 fc_conf->mode = RTE_FC_TX_PAUSE;
2144 fc_conf->mode = RTE_FC_NONE;
2150 eth_igc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2152 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2153 uint32_t rx_buf_size;
2154 uint32_t max_high_water;
2158 if (fc_conf->autoneg != hw->mac.autoneg)
2161 rx_buf_size = igc_get_rx_buffer_size(hw);
2162 PMD_DRV_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
2164 /* At least reserve one Ethernet frame for watermark */
2165 max_high_water = rx_buf_size - RTE_ETHER_MAX_LEN;
2166 if (fc_conf->high_water > max_high_water ||
2167 fc_conf->high_water < fc_conf->low_water) {
2169 "Incorrect high(%u)/low(%u) water value, max is %u",
2170 fc_conf->high_water, fc_conf->low_water,
2175 switch (fc_conf->mode) {
2177 hw->fc.requested_mode = igc_fc_none;
2179 case RTE_FC_RX_PAUSE:
2180 hw->fc.requested_mode = igc_fc_rx_pause;
2182 case RTE_FC_TX_PAUSE:
2183 hw->fc.requested_mode = igc_fc_tx_pause;
2186 hw->fc.requested_mode = igc_fc_full;
2189 PMD_DRV_LOG(ERR, "unsupported fc mode: %u", fc_conf->mode);
2193 hw->fc.pause_time = fc_conf->pause_time;
2194 hw->fc.high_water = fc_conf->high_water;
2195 hw->fc.low_water = fc_conf->low_water;
2196 hw->fc.send_xon = fc_conf->send_xon;
2198 err = igc_setup_link_generic(hw);
2199 if (err == IGC_SUCCESS) {
2201 * check if we want to forward MAC frames - driver doesn't have
2202 * native capability to do that, so we'll write the registers
2205 rctl = IGC_READ_REG(hw, IGC_RCTL);
2207 /* set or clear MFLCN.PMCF bit depending on configuration */
2208 if (fc_conf->mac_ctrl_frame_fwd != 0)
2209 rctl |= IGC_RCTL_PMCF;
2211 rctl &= ~IGC_RCTL_PMCF;
2213 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
2214 IGC_WRITE_FLUSH(hw);
2219 PMD_DRV_LOG(ERR, "igc_setup_link_generic = 0x%x", err);
2224 eth_igc_rss_reta_update(struct rte_eth_dev *dev,
2225 struct rte_eth_rss_reta_entry64 *reta_conf,
2228 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2231 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2233 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2234 reta_size, ETH_RSS_RETA_SIZE_128);
2238 /* set redirection table */
2239 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2240 union igc_rss_reta_reg reta, reg;
2241 uint16_t idx, shift;
2244 idx = i / RTE_RETA_GROUP_SIZE;
2245 shift = i % RTE_RETA_GROUP_SIZE;
2246 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2247 IGC_RSS_RDT_REG_SIZE_MASK);
2249 /* if no need to update the register */
2253 /* check mask whether need to read the register value first */
2254 if (mask == IGC_RSS_RDT_REG_SIZE_MASK)
2257 reg.dword = IGC_READ_REG_LE_VALUE(hw,
2258 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2260 /* update the register */
2261 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2262 if (mask & (1u << j))
2264 (uint8_t)reta_conf[idx].reta[shift + j];
2266 reta.bytes[j] = reg.bytes[j];
2268 IGC_WRITE_REG_LE_VALUE(hw,
2269 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE), reta.dword);
2276 eth_igc_rss_reta_query(struct rte_eth_dev *dev,
2277 struct rte_eth_rss_reta_entry64 *reta_conf,
2280 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2283 if (reta_size != ETH_RSS_RETA_SIZE_128) {
2285 "The size of RSS redirection table configured(%d) doesn't match the number hardware can supported(%d)",
2286 reta_size, ETH_RSS_RETA_SIZE_128);
2290 /* read redirection table */
2291 for (i = 0; i < ETH_RSS_RETA_SIZE_128; i += IGC_RSS_RDT_REG_SIZE) {
2292 union igc_rss_reta_reg reta;
2293 uint16_t idx, shift;
2296 idx = i / RTE_RETA_GROUP_SIZE;
2297 shift = i % RTE_RETA_GROUP_SIZE;
2298 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2299 IGC_RSS_RDT_REG_SIZE_MASK);
2301 /* if no need to read register */
2305 /* read register and get the queue index */
2306 reta.dword = IGC_READ_REG_LE_VALUE(hw,
2307 IGC_RETA(i / IGC_RSS_RDT_REG_SIZE));
2308 for (j = 0; j < IGC_RSS_RDT_REG_SIZE; j++) {
2309 if (mask & (1u << j))
2310 reta_conf[idx].reta[shift + j] = reta.bytes[j];
2318 eth_igc_rss_hash_update(struct rte_eth_dev *dev,
2319 struct rte_eth_rss_conf *rss_conf)
2321 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2322 igc_hw_rss_hash_set(hw, rss_conf);
2327 eth_igc_rss_hash_conf_get(struct rte_eth_dev *dev,
2328 struct rte_eth_rss_conf *rss_conf)
2330 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2331 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
2335 if (hash_key != NULL) {
2338 /* if not enough space for store hash key */
2339 if (rss_conf->rss_key_len != IGC_HKEY_SIZE) {
2341 "RSS hash key size %u in parameter doesn't match the hardware hash key size %u",
2342 rss_conf->rss_key_len, IGC_HKEY_SIZE);
2346 /* read RSS key from register */
2347 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
2348 hash_key[i] = IGC_READ_REG_LE_VALUE(hw, IGC_RSSRK(i));
2351 /* get RSS functions configured in MRQC register */
2352 mrqc = IGC_READ_REG(hw, IGC_MRQC);
2353 if ((mrqc & IGC_MRQC_ENABLE_RSS_4Q) == 0)
2357 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4)
2358 rss_hf |= ETH_RSS_IPV4;
2359 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_TCP)
2360 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2361 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6)
2362 rss_hf |= ETH_RSS_IPV6;
2363 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_EX)
2364 rss_hf |= ETH_RSS_IPV6_EX;
2365 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP)
2366 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2367 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_TCP_EX)
2368 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2369 if (mrqc & IGC_MRQC_RSS_FIELD_IPV4_UDP)
2370 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2371 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP)
2372 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2373 if (mrqc & IGC_MRQC_RSS_FIELD_IPV6_UDP_EX)
2374 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2376 rss_conf->rss_hf |= rss_hf;
2381 eth_igc_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
2383 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2384 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2389 vid_idx = (vlan_id >> IGC_VFTA_ENTRY_SHIFT) & IGC_VFTA_ENTRY_MASK;
2390 vid_bit = 1u << (vlan_id & IGC_VFTA_ENTRY_BIT_SHIFT_MASK);
2391 vfta = shadow_vfta->vfta[vid_idx];
2396 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, vid_idx, vfta);
2398 /* update local VFTA copy */
2399 shadow_vfta->vfta[vid_idx] = vfta;
2405 igc_vlan_hw_filter_disable(struct rte_eth_dev *dev)
2407 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2408 igc_read_reg_check_clear_bits(hw, IGC_RCTL,
2409 IGC_RCTL_CFIEN | IGC_RCTL_VFE);
2413 igc_vlan_hw_filter_enable(struct rte_eth_dev *dev)
2415 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2416 struct igc_vfta *shadow_vfta = IGC_DEV_PRIVATE_VFTA(dev);
2420 /* Filter Table Enable, CFI not used for packet acceptance */
2421 reg_val = IGC_READ_REG(hw, IGC_RCTL);
2422 reg_val &= ~IGC_RCTL_CFIEN;
2423 reg_val |= IGC_RCTL_VFE;
2424 IGC_WRITE_REG(hw, IGC_RCTL, reg_val);
2426 /* restore VFTA table */
2427 for (i = 0; i < IGC_VFTA_SIZE; i++)
2428 IGC_WRITE_REG_ARRAY(hw, IGC_VFTA, i, shadow_vfta->vfta[i]);
2432 igc_vlan_hw_strip_disable(struct rte_eth_dev *dev)
2434 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2436 igc_read_reg_check_clear_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2440 igc_vlan_hw_strip_enable(struct rte_eth_dev *dev)
2442 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2444 igc_read_reg_check_set_bits(hw, IGC_CTRL, IGC_CTRL_VME);
2448 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
2450 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2453 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2455 /* if extend vlan hasn't been enabled */
2456 if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
2459 if ((dev->data->dev_conf.rxmode.offloads &
2460 DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
2461 goto write_ext_vlan;
2463 /* Update maximum packet length */
2464 if (dev->data->dev_conf.rxmode.max_rx_pkt_len <
2465 RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
2466 PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
2467 dev->data->dev_conf.rxmode.max_rx_pkt_len,
2468 VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
2471 dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE;
2472 IGC_WRITE_REG(hw, IGC_RLPML,
2473 dev->data->dev_conf.rxmode.max_rx_pkt_len);
2476 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
2481 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
2483 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2486 ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
2488 /* if extend vlan has been enabled */
2489 if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
2492 if ((dev->data->dev_conf.rxmode.offloads &
2493 DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
2494 goto write_ext_vlan;
2496 /* Update maximum packet length */
2497 if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
2498 MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) {
2499 PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
2500 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2501 VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE);
2504 dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE;
2505 IGC_WRITE_REG(hw, IGC_RLPML,
2506 dev->data->dev_conf.rxmode.max_rx_pkt_len);
2509 IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
2514 eth_igc_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2516 struct rte_eth_rxmode *rxmode;
2518 rxmode = &dev->data->dev_conf.rxmode;
2519 if (mask & ETH_VLAN_STRIP_MASK) {
2520 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2521 igc_vlan_hw_strip_enable(dev);
2523 igc_vlan_hw_strip_disable(dev);
2526 if (mask & ETH_VLAN_FILTER_MASK) {
2527 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
2528 igc_vlan_hw_filter_enable(dev);
2530 igc_vlan_hw_filter_disable(dev);
2533 if (mask & ETH_VLAN_EXTEND_MASK) {
2534 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
2535 return igc_vlan_hw_extend_enable(dev);
2537 return igc_vlan_hw_extend_disable(dev);
2544 eth_igc_vlan_tpid_set(struct rte_eth_dev *dev,
2545 enum rte_vlan_type vlan_type,
2548 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2551 /* only outer TPID of double VLAN can be configured*/
2552 if (vlan_type == ETH_VLAN_TYPE_OUTER) {
2553 reg_val = IGC_READ_REG(hw, IGC_VET);
2554 reg_val = (reg_val & (~IGC_VET_EXT)) |
2555 ((uint32_t)tpid << IGC_VET_EXT_SHIFT);
2556 IGC_WRITE_REG(hw, IGC_VET, reg_val);
2561 /* all other TPID values are read-only*/
2562 PMD_DRV_LOG(ERR, "Not supported");
2567 eth_igc_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2568 struct rte_pci_device *pci_dev)
2570 PMD_INIT_FUNC_TRACE();
2571 return rte_eth_dev_pci_generic_probe(pci_dev,
2572 sizeof(struct igc_adapter), eth_igc_dev_init);
2576 eth_igc_pci_remove(struct rte_pci_device *pci_dev)
2578 PMD_INIT_FUNC_TRACE();
2579 return rte_eth_dev_pci_generic_remove(pci_dev, eth_igc_dev_uninit);
2582 static struct rte_pci_driver rte_igc_pmd = {
2583 .id_table = pci_id_igc_map,
2584 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
2585 .probe = eth_igc_pci_probe,
2586 .remove = eth_igc_pci_remove,
2589 RTE_PMD_REGISTER_PCI(net_igc, rte_igc_pmd);
2590 RTE_PMD_REGISTER_PCI_TABLE(net_igc, pci_id_igc_map);
2591 RTE_PMD_REGISTER_KMOD_DEP(net_igc, "* igb_uio | uio_pci_generic | vfio-pci");